text
stringlengths 12
1.05M
| repo_name
stringlengths 5
86
| path
stringlengths 4
191
| language
stringclasses 1
value | license
stringclasses 15
values | size
int32 12
1.05M
| keyword
listlengths 1
23
| text_hash
stringlengths 64
64
|
|---|---|---|---|---|---|---|---|
import os
from flask import Flask, url_for, request, render_template, jsonify, send_file
from werkzeug.utils import secure_filename
import deepchem as dc
import subprocess
from shutil import copyfile
import csv
import rdkit
from rdkit import Chem
from rdkit.Chem import AllChem
from rdkit.Chem import Draw
STATIC_DIR = os.path.join(os.path.dirname(__file__), 'static/')
DEEPCHEM_GUI = Flask('deepchem-gui', static_folder=STATIC_DIR,
static_url_path='/static',
template_folder=os.path.join(STATIC_DIR, 'deepchem-gui',
'templates')
)
UPLOAD_DIR = os.path.join(STATIC_DIR, "data/")
if not os.path.isdir(UPLOAD_DIR):
os.mkdir(UPLOAD_DIR)
print("Created data directory")
# serve ngl webapp clone
@DEEPCHEM_GUI.route('/')
def webapp():
return render_template('webapp.html')
# download protein and ligand files
@DEEPCHEM_GUI.route('/upload', methods=['POST'])
def upload():
if request.method == 'POST':
proteins = request.files.getlist('proteins')
ligands = request.files.getlist('ligands')
smiles = request.files.getlist('smiles')
smarts = request.files.getlist('smarts')
if proteins and ligands:
protein_fns = []
ligand_fns = []
for protein in proteins:
protein_fn = os.path.join(
UPLOAD_DIR,
secure_filename(protein.filename)
)
protein.save(protein_fn)
protein_fns.append(protein_fn)
for ligand in ligands:
ligand_fn = os.path.join(
UPLOAD_DIR,
secure_filename(ligand.filename)
)
ligand.save(ligand_fn)
ligand_fns.append(ligand_fn)
docking_result = dock(protein_fns, ligand_fns)
print(docking_result)
for i in range(len(protein_fns)):
for j in range(len(ligand_fns)):
protein_fn = docking_result[i][j]["protein"]
new_protein_fn = protein_fn.split("/")[-1]
copyfile(protein_fn, os.path.join(
UPLOAD_DIR, new_protein_fn))
docking_result[i][j]["protein"] = url_for(
'static', filename="data/" + new_protein_fn)
ligand_fn = docking_result[i][j]["ligand"]
new_ligand_fn = ligand_fn.split("/")[-1]
copyfile(ligand_fn,
os.path.join(UPLOAD_DIR, new_ligand_fn))
docking_result[i][j]["ligand"] = url_for(
'static', filename="data/" + new_ligand_fn)
return jsonify(docking_result)
elif smiles:
smiles = smiles[0]
smiles_fn = os.path.join(
UPLOAD_DIR,
secure_filename(smiles.filename)
)
smiles.save(smiles_fn)
csvfile = open(smiles_fn, 'r')
csvreader = csv.reader(csvfile, delimiter=',')
data = []
for row in csvreader:
data.append(row)
data = render_smiles(data)
return jsonify(data)
elif smarts:
smarts = smarts[0]
smarts_fn = os.path.join(
UPLOAD_DIR,
secure_filename(smarts.filename)
)
smarts.save(smarts_fn)
csvfile = open(smarts_fn, 'r')
csvreader = csv.reader(csvfile, delimiter=',')
data = []
for row in csvreader:
data.append(row)
data = render_smarts(data)
return jsonify(data)
else:
return jsonify(error_msg="Invalid file transfer.")
else:
raise NotImplementedError
def render_smiles(data):
smiles_col_idx = [j for j in range(len(data[0])) if data[0][j]=="SMILES"][0]
for i, row in enumerate(data):
if i==0:
data[i].append("SMILES IMG")
continue
try:
smiles_str = data[i][smiles_col_idx]
smiles = Chem.MolFromSmiles(smiles_str)
AllChem.Compute2DCoords(smiles)
smiles_fn = 'smiles_%d.png' % i
smiles_img = os.path.join(UPLOAD_DIR, smiles_fn)
Draw.MolToFile(smiles, smiles_img)
data[i].append(url_for('static', filename='data/' + smiles_fn))
except Exception as e:
print(e)
data[i].append("Invalid")
pass
return data
def render_smarts(data):
smarts_col_idx = [j for j in range(len(data[0])) if data[0][j]=="SMARTS"][0]
smiles_col_idx_1 = [j for j in range(len(data[0])) if data[0][j]=="SMILES_1"][0]
smiles_col_idx_2 = [j for j in range(len(data[0])) if data[0][j]=="SMILES_2"][0]
for i, row in enumerate(data):
if i==0:
data[i].append("PRODUCT")
data[i].append("SMILES_1 IMG")
data[i].append("SMILES_2 IMG")
data[i].append("PRODUCT IMG")
continue
try:
smarts_str = data[i][smarts_col_idx]
smiles_str_1 = data[i][smiles_col_idx_1]
smiles_str_2 = data[i][smiles_col_idx_2]
rxn = AllChem.ReactionFromSmarts(smarts_str)
ps = rxn.RunReactants((Chem.MolFromSmiles(smiles_str_1), Chem.MolFromSmiles(smiles_str_2)))
product = ps[0][0]
product_str = Chem.MolToSmiles(product)
data[i].append(product_str)
AllChem.Compute2DCoords(product)
product_fn = 'product_%d.png' % i
product_img = os.path.join(UPLOAD_DIR, product_fn)
Draw.MolToFile(product, product_img)
smiles_1 = Chem.MolFromSmiles(smiles_str_1)
AllChem.Compute2DCoords(smiles_1)
smiles_1_fn = 'smiles_1_%d.png' % i
smiles_1_img = os.path.join(UPLOAD_DIR, smiles_1_fn)
Draw.MolToFile(smiles_1, smiles_1_img)
smiles_2 = Chem.MolFromSmiles(smiles_str_2)
AllChem.Compute2DCoords(smiles_2)
smiles_2_fn = 'smiles_2_%d.png' % i
smiles_2_img = os.path.join(UPLOAD_DIR, smiles_2_fn)
Draw.MolToFile(smiles_2, smiles_2_img)
data[i].append(url_for('static', filename='data/' + product_fn))
data[i].append(url_for('static', filename='data/' + smiles_1_fn))
data[i].append(url_for('static', filename='data/' + smiles_2_fn))
except Exception as e:
print(e)
data[i].append("Invalid")
data[i].append("Invalid")
data[i].append("Invalid")
pass
return data
def dock(protein_fns, ligand_fns):
docking_result = [[{} for j in range(len(ligand_fns))]
for i in range(len(protein_fns))]
for i in range(len(protein_fns)):
for j in range(len(ligand_fns)):
protein_fn = protein_fns[i]
ligand_fn = ligand_fns[j]
print("Docking: %s to %s" % (ligand_fn, protein_fn))
docker = dc.dock.VinaGridDNNDocker(
exhaustiveness=1, detect_pockets=False)
(score, (protein_docked, ligand_docked)
) = docker.dock(protein_fn, ligand_fn)
print("Scores: %f" % (score))
print("Docked protein: %s" % (protein_docked))
print("Docked ligand: %s" % (ligand_docked))
ligand_docked_fn = ligand_docked.replace(".pdbqt", "")
subprocess.call("csh %s %s" % (os.path.join(STATIC_DIR, 'deepchem-gui', 'scripts', 'stripqt.sh'),
ligand_docked_fn), shell=True)
ligand_docked_pdb = ligand_docked_fn + ".pdb"
docking_result[i][j] = {'score': score[
0], 'protein': protein_docked, 'ligand': ligand_docked_pdb}
return docking_result
|
deepchem/deepchem-gui
|
gui/app.py
|
Python
|
gpl-3.0
| 8,020
|
[
"RDKit"
] |
ba90d6b1f8a77147a62fdc984205db052fd55458bfaf686e23831f3fed5f6bc0
|
"""Module containing the nextera pipeline."""
import logging
from pathlib import Path
import pysam
import toolz
from pyim.external.bowtie2 import bowtie2
from pyim.external.cutadapt import cutadapt, cutadapt_summary
from pyim.external.util import flatten_arguments
from pyim.model import Insertion
from pyim.util.path import WorkDirectory, shorten_path, extract_suffix
from .base import Aligner, PairedEndCommand
from ..util import AlignmentSummary
class NexteraAligner(Aligner):
"""Nextera-based transposon pipeline.
Analyzes paired-end sequence data that was prepared using a Nextera-based
protocol. Sequence reads are expected to have the following structure::
Mate 1:
[Genomic]
Mate 2:
[Transposon][Genomic]
Here, ``transposon`` refers to the flanking part of the transposon sequence
and ``genomic`` refers to the genomic DNA located between the transposon
sequence and the used adapt sequence. Note that the adapter itself is not
sequenced and therefore not part of the reads. However, the end of Mate 1
is considered to terminate at the adapter and as such represents the
breakpoint between the genomic DNA and the adapter.
The pipeline essentially performs the following steps:
- Mates are trimmed to remove the transposon sequence, dropping any
reads not containing the transposon.
- The remaining mates are trimmed to remove any sequences from
the Nextera transposase.
- The trimmed mates are aligned to the reference genome.
- The resulting alignment is used to identify insertions.
Parameters
----------
transposon_path : Path
Path to the (flanking) transposon sequence (fasta).
bowtie_index_path : Path
Path to the bowtie index.
bowtie_options : Dict[str, Any]
Dictionary of extra options for Bowtie.
min_length : int
Minimum length for genomic reads to be kept for alignment.
min_support : int
Minimum support for insertions to be kept in the final output.
min_mapq : int
Minimum mapping quality of alignments to be used for
identifying insertions.
merge_distance : int
Maximum distance within which insertions are merged. Used to merge
insertions that occur within close vicinity, which is typically due
to slight variations in alignments.
threads : int
The number of threads to use for the alignment.
"""
def __init__(self,
transposon_path,
bowtie_index_path,
bowtie_options=None,
min_length=15,
min_support=2,
min_mapq=23,
merge_distance=None,
threads=1,
sample_name=None,
logger=None):
super().__init__()
self._transposon_path = transposon_path
self._index_path = bowtie_index_path
self._bowtie_options = bowtie_options or {}
self._min_length = min_length
self._min_support = min_support
self._min_mapq = min_mapq
self._merge_distance = merge_distance
self._threads = threads
self._sample_name = sample_name
self._logger = logger or logging.getLogger()
def trim(self, read_paths, output_paths, work_dir=None):
"""Trims reads to remove transposon/nextera sequences."""
# if logger is not None:
# logger.info('Extracting genomic sequences')
# logger.info(' %-18s: %s', 'Transposon',
# shorten_path(self._transposon_path))
# logger.info(' %-18s: %s', 'Minimum length', self._min_length)
self._check_read_paths(read_paths)
suffix = extract_suffix(read_paths[0])
with WorkDirectory(work_dir, keep=work_dir is not None) as work_dir:
# Select reads with transposon and trim sequence.
trimmed_tr_paths = (work_dir / ('trimmed.transposon.R1' + suffix),
work_dir / ('trimmed.transposon.R2' + suffix))
self._trim_transposon(read_paths, trimmed_tr_paths)
# Trim nextera sequences if present.
trimmed_nt_paths = (work_dir / ('trimmed.nextera.R1' + suffix),
work_dir / ('trimmed.nextera.R2' + suffix))
self._trim_nextera(trimmed_tr_paths, trimmed_nt_paths)
# Move outputs into position.
for file_path, output_path in zip(trimmed_nt_paths, output_paths):
file_path.rename(output_path)
def _check_read_paths(self, read_paths):
"""Checks read paths input for validity."""
if len(read_paths) != 2:
raise ValueError(self.__class__.__name__ +
' only supports paired-end data')
def _trim_transposon(self, read_paths, output_paths):
"""Selects and trims mates with transposon sequence in second read."""
cutadapt_opts = {
'-G': 'file:' + str(self._transposon_path),
'--discard-untrimmed': True,
'--pair-filter=both': True
}
process = cutadapt(
read_path=read_paths[0],
read2_path=read_paths[1],
out_path=output_paths[0],
out2_path=output_paths[1],
options=cutadapt_opts)
summary = cutadapt_summary(process.stdout, padding=' ')
self._logger.info('Trimmed transposon sequence' + summary)
def _trim_nextera(self, read_paths, output_paths):
"""Trims nextera sequences from mates and filters for min length."""
cutadapt_opts = {
'-a': 'CTGTCTCTTATA',
'-A': 'CTGTCTCTTATA',
'--minimum-length': self._min_length,
}
process = cutadapt(
read_path=read_paths[0],
read2_path=read_paths[1],
out_path=output_paths[0],
out2_path=output_paths[1],
options=cutadapt_opts)
summary = cutadapt_summary(process.stdout, padding=' ')
self._logger.info('Trimmed nextera sequences and '
'filtered for length' + summary)
def align(self, read_paths, output_path):
"""Aligns mates to reference using bowtie2."""
self._check_read_paths(read_paths)
extra_opts = {'--threads': self._threads}
options = toolz.merge(self._bowtie_options, extra_opts)
# Align reads to genome.
# logger.info('Aligning to reference')
# logger.info(' %-18s: %s', 'Reference', shorten_path(self._index_path))
# logger.info(' %-18s: %s', 'Bowtie options',
# flatten_arguments(options))
bowtie2(
read_paths=[read_paths[0]],
read2_paths=[read_paths[1]],
index_path=self._index_path,
output_path=output_path,
options=options,
verbose=True)
def extract(self, bam_path):
"""Extract insertions from alignment."""
bam_file = pysam.AlignmentFile(str(bam_path))
try:
summary = AlignmentSummary.from_alignments(
iter(bam_file),
position_func=self._position_for_mates,
sample_func=lambda m1, m2: self._sample_name,
min_mapq=self._min_mapq,
paired=True)
finally:
bam_file.close()
if self._merge_distance is not None:
summary = summary.merge_within_distance(self._merge_distance)
insertions = summary.to_insertions(min_support=self._min_support)
yield from insertions
@staticmethod
def _position_for_mates(mate1, mate2):
"""Returns transposon/linker positions for given mates."""
ref = mate1.reference_name
if mate1.is_reverse:
transposon_pos = mate2.reference_start
linker_pos = mate1.reference_end
strand = 1
else:
transposon_pos = mate2.reference_end
linker_pos = mate1.reference_start
strand = -1
return (ref, transposon_pos, strand), linker_pos
def run(self, read_paths, work_dir=None):
"""Runs aligner on given read files."""
self._check_read_paths(read_paths)
suffix = extract_suffix(read_paths[0])
with WorkDirectory(work_dir, keep=work_dir is not None) as work_dir:
# Trim reads and align to reference.
trimmed_paths = (work_dir / ('trimmed.R1' + suffix),
work_dir / ('trimmed.R2' + suffix))
self.trim(read_paths, trimmed_paths, work_dir=work_dir)
alignment_path = work_dir / 'alignment.bam'
self.align(trimmed_paths, output_path=alignment_path)
# Extract insertions.
insertions = list(self.extract(alignment_path))
return insertions
class NexteraCommand(PairedEndCommand):
"""Command for the Nextera aligner."""
name = 'nextera'
def configure(self, parser):
super().configure(parser)
parser.add_argument('--transposon', type=Path, required=True)
parser.add_argument('--bowtie_index', type=Path, required=True)
parser.add_argument('--sample_name', required=True)
parser.add_argument('--min_length', type=int, default=15)
parser.add_argument('--min_support', type=int, default=2)
parser.add_argument('--min_mapq', type=int, default=23)
parser.add_argument('--merge_distance', type=int, default=None)
parser.add_argument('--local', default=False, action='store_true')
parser.add_argument('--work_dir', default=None)
parser.add_argument('--threads', default=1, type=int)
return parser
def run(self, args):
bowtie_options = {'--local': args.local, '--threads': args.threads}
aligner = NexteraAligner(
transposon_path=args.transposon,
bowtie_index_path=args.bowtie_index,
min_length=args.min_length,
min_support=args.min_support,
min_mapq=args.min_mapq,
merge_distance=args.merge_distance,
bowtie_options=bowtie_options,
sample_name=args.sample_name,
threads=args.threads)
insertions = aligner.run(args.reads, work_dir=args.work_dir)
args.output.parent.mkdir(exist_ok=True, parents=True)
Insertion.to_csv(args.output, insertions, sep='\t', index=False)
|
jrderuiter/pyim
|
src/pyim/align/aligners/nextera.py
|
Python
|
mit
| 10,533
|
[
"Bowtie",
"pysam"
] |
673540427a634d798478ed2647e1f03cfc4b868b63c7e5fa2904e9c7ceb855dc
|
"""
Create db from gdf files to be able to then select neuron spike times.
Best use case:
1. run simulation
2. create sqlite db of spike times with indexing
3. use this db many times
Creating index for db will dominate insertions for larger set of spike times.
[TODO] Check how much slower things are if index is created at start
[TODO] Simplify block_read. sqlite probably can covert for insert.
[TODO] Error checking
[TODO] sqlite optimizations
[TODO] Create read buffers once instead of for each file
[TODO] Spike times may be storable as int32 rather than float, save space
"""
import numpy as np
import sqlite3 as sqlite
import os, glob
from time import time as now
import matplotlib.pyplot as plt
plt.rcdefaults()
plt.rcParams.update({
'font.size' : 16,
'axes.labelsize' : 16,
'axes.titlesize' : 16,
'legend.fontsize' : 14,
'xtick.labelsize' : 16,
'ytick.labelsize' : 16,
'figure.subplot.wspace' : 0.3,
'figure.subplot.hspace' : 0.3,
})
class GDF(object):
"""
1. Read from gdf files
2. Create sqlite db of (neuron, spike time)
3. Query spike times for neurons
"""
def __init__(self, dbname, bsize=int(1e6), new_db=True,
debug=False):
"""
Parameters:
::
dbname : str,
filename of sqlite database
bsize : int,
number of spike times to insert
new_db : bool,
new database with name dbname, will overwrite
at a time, determines memory usage
"""
if new_db:
try:
os.unlink(dbname)
except:
print 'creating new database file %s' % dbname
self.conn = sqlite.connect(dbname)
self.cursor = self.conn.cursor()
self.bsize = bsize
self.debug = debug
def _blockread(self, fname):
"""
Generator yields bsize lines from gdf file
Parameters:
::
fname : str,
name of gdf-file
"""
with open(fname, 'rb') as f:
while True:
a = []
for i in xrange(self.bsize):
line = f.readline()
if not line: break
a.append(line.split())
if a == []: raise StopIteration
yield a
f.close()
def create(self, re='brunel-py-ex-*.gdf', index=True):
"""
Create db from list of gdf file glob
Parameters:
::
re : str,
file glob to load
index : bool,
create index on neurons for speed
"""
self.cursor.execute('CREATE TABLE IF NOT EXISTS spikes (neuron INT UNSIGNED, time REAL)')
tic = now()
for f in glob.glob(re):
print f
while True:
try:
for data in self._blockread(f):
self.cursor.executemany('INSERT INTO spikes VALUES (?, ?)', data)
self.conn.commit()
except:
continue
break
toc = now()
if self.debug: print 'Inserts took %g seconds.' % (toc-tic)
# optionally, create index for speed
if index:
tic = now()
self.cursor.execute('CREATE INDEX neuron_index on spikes (neuron)')
toc = now()
if self.debug: print 'Indexed db in %g seconds.' % (toc-tic)
def create_from_list(self, re=[], index=True):
'''
create db from list of arrays
Parameters:
::
re : list,
index of element is cell index, and element i an array of spike times in ms
index : bool,
create index on neurons for speed
'''
self.cursor.execute('CREATE TABLE IF NOT EXISTS spikes (neuron INT UNSIGNED, time REAL)')
tic = now()
i = 0
for x in re:
data = zip([i] * len(x), x)
self.cursor.executemany('INSERT INTO spikes VALUES (?, ?)', data)
i += 1
self.conn.commit()
toc = now()
if self.debug: print 'Inserts took %g seconds.' % (toc-tic)
# optionally, create index for speed
if index:
tic = now()
self.cursor.execute('CREATE INDEX neuron_index on spikes (neuron)')
toc = now()
if self.debug: print 'Indexed db in %g seconds.' % (toc-tic)
def select(self, neurons):
"""
Select spike trains.
Parameters:
::
neurons : np.array or list of neurons
Returns:
::
s : list of np.array's
spike times
"""
s = []
for neuron in neurons:
self.cursor.execute('SELECT time FROM spikes where neuron = %d' % neuron)
sel = self.cursor.fetchall()
spikes = np.array(sel).flatten()
s.append(spikes)
return s
def interval(self, T=[0, 1000]):
"""
Get all spikes in a time interval T
Parameters:
::
T : list,
time interval
Returns:
::
s : list,
nested list with spike times
"""
self.cursor.execute('SELECT * FROM spikes WHERE time BETWEEN %f AND %f' % tuple(T))
sel = self.cursor.fetchall()
return sel
def select_neurons_interval(self, neurons, T=[0, 1000]):
"""
Get all spikes from neurons in a time interval T.
Parameters:
::
T : list,
time interval
Returns:
::
s : list,
nested list with spike times
"""
s = []
for neuron in neurons:
self.cursor.execute('SELECT time FROM spikes WHERE time BETWEEN %f AND %f and neuron = %d' % (T[0], T[1], neuron))
sel = self.cursor.fetchall()
spikes = np.array(sel).flatten()
s.append(spikes)
return s
def neurons(self):
"""
Return list of neuron indices
Returns:
::
list
"""
self.cursor.execute('SELECT DISTINCT neuron FROM spikes ORDER BY neuron')
sel = self.cursor.fetchall()
return np.array(sel).flatten()
def num_spikes(self):
"""
Return total number of spikes
Returns:
::
list
"""
self.cursor.execute('SELECT Count(*) from spikes')
rows = self.cursor.fetchall()[0]
# Check against 'wc -l *ex*.gdf'
if self.debug: print 'DB has %d spikes' % rows
return rows
def close(self):
"""
"""
self.cursor.close()
self.conn.close()
def plotstuff(self, T=[0, 1000]):
'''
create a scatter plot of the contents of the database,
with entries on the interval T
Parameters:
::
T : list,
time interval
'''
fig = plt.figure(figsize=(10,10))
ax = fig.add_subplot(111)
neurons = self.neurons()
i = 0
for x in self.select_neurons_interval(neurons, T):
ax.plot(x, np.zeros(x.size) + neurons[i], 'o',
markersize=1, markerfacecolor='k', markeredgecolor='k',
alpha=0.25)
i += 1
ax.set_xlabel('time (ms)')
ax.set_ylabel('neuron ID')
ax.set_xlim(T[0], T[1])
ax.set_ylim(neurons.min(), neurons.max())
ax.set_title('database content on T = [%.0f, %.0f]' % (T[0], T[1]))
def test1():
# need have a bunch of gdf files in current directory
# delete old db
os.system('rm test.db')
# create db from excitatory files
gdb = GDF('test.db', debug=True)
gdb.create(re='brunel-py-ex-*.gdf', index=True)
# get spikes for neurons 1,2,3
spikes = gdb.select([1,2,3])
# wont get any spikes for these neurons
# cause they dont exist
bad = gdb.select([100000,100001])
gdb.close()
print spikes
print bad
if __name__ == '__main__':
test1()
|
espenhgn/ViSAPy
|
ViSAPy/gdf.py
|
Python
|
gpl-2.0
| 8,440
|
[
"NEURON"
] |
b22a844f41d03d6a3eff3e4b9f7e3f208fdb4d3066529d78d4ac0f3c713be3e2
|
#!/usr/bin/env python
import vtk
from vtk.test import Testing
from vtk.util.misc import vtkGetDataRoot
VTK_DATA_ROOT = vtkGetDataRoot()
def GetRGBColor(colorName):
'''
Return the red, green and blue components for a
color as doubles.
'''
rgb = [0.0, 0.0, 0.0] # black
vtk.vtkNamedColors().GetColorRGB(colorName, rgb)
return rgb
# Create the RenderWindow, Renderer and both Actors
#
ren1 = vtk.vtkRenderer()
ren2 = vtk.vtkRenderer()
renWin = vtk.vtkRenderWindow()
renWin.AddRenderer(ren1)
renWin.AddRenderer(ren2)
iren = vtk.vtkRenderWindowInteractor()
iren.SetRenderWindow(renWin)
# create pipeline for ren1
#
pl3d2 = vtk.vtkMultiBlockPLOT3DReader()
pl3d2.SetXYZFileName(VTK_DATA_ROOT + "/Data/combxyz.bin")
pl3d2.SetQFileName(VTK_DATA_ROOT + "/Data/combq.bin")
pl3d2.SetScalarFunctionNumber(153)
pl3d2.Update()
output2 = pl3d2.GetOutput().GetBlock(0)
pl3d = vtk.vtkMultiBlockPLOT3DReader()
pl3d.SetXYZFileName(VTK_DATA_ROOT + "/Data/combxyz.bin")
pl3d.SetQFileName(VTK_DATA_ROOT + "/Data/combq.bin")
pl3d.SetScalarFunctionNumber(120)
pl3d.SetVectorFunctionNumber(202)
pl3d.Update()
output = pl3d.GetOutput().GetBlock(0)
iso = vtk.vtkContourFilter()
iso.SetInputData(output)
iso.SetValue(0, -100000)
probe2 = vtk.vtkProbeFilter()
probe2.SetInputConnection(iso.GetOutputPort())
probe2.SetSourceData(output2)
cast2 = vtk.vtkCastToConcrete()
cast2.SetInputConnection(probe2.GetOutputPort())
normals = vtk.vtkPolyDataNormals()
normals.SetInputConnection(cast2.GetOutputPort())
normals.SetFeatureAngle(45)
isoMapper = vtk.vtkPolyDataMapper()
isoMapper.SetInputConnection(normals.GetOutputPort())
isoMapper.ScalarVisibilityOn()
isoMapper.SetScalarRange(output2.GetPointData().GetScalars().GetRange())
isoActor = vtk.vtkActor()
isoActor.SetMapper(isoMapper)
isoActor.GetProperty().SetColor(GetRGBColor('bisque'))
outline = vtk.vtkStructuredGridOutlineFilter()
outline.SetInputData(output)
outlineMapper = vtk.vtkPolyDataMapper()
outlineMapper.SetInputConnection(outline.GetOutputPort())
outlineActor = vtk.vtkActor()
outlineActor.SetMapper(outlineMapper)
# Add the actors to the renderer, set the background and size
#
ren1.AddActor(outlineActor)
ren1.AddActor(isoActor)
ren1.SetBackground(1, 1, 1)
ren1.SetViewport(0, 0, .5, 1)
renWin.SetSize(512, 256)
ren1.SetBackground(0.1, 0.2, 0.4)
ren1.ResetCamera()
cam1 = ren1.GetActiveCamera()
cam1.SetClippingRange(3.95297, 50)
cam1.SetFocalPoint(9.71821, 0.458166, 29.3999)
cam1.SetPosition(2.7439, -37.3196, 38.7167)
cam1.SetViewUp(-0.16123, 0.264271, 0.950876)
aPlane = vtk.vtkPlaneSource()
aPlaneMapper = vtk.vtkPolyDataMapper()
aPlaneMapper.SetInputConnection(aPlane.GetOutputPort())
aPlaneMapper.ImmediateModeRenderingOn()
screen = vtk.vtkActor()
screen.SetMapper(aPlaneMapper)
ren2.AddActor(screen)
ren2.SetViewport(.5, 0, 1, 1)
ren2.GetActiveCamera().Azimuth(30)
ren2.GetActiveCamera().Elevation(30)
ren2.SetBackground(.8, .4, .3)
ren1.ResetCameraClippingRange()
ren2.ResetCamera()
ren2.ResetCameraClippingRange()
renWin.Render()
ren1Image = vtk.vtkRendererSource()
ren1Image.SetInput(ren1)
ren1Image.DepthValuesOn()
aTexture = vtk.vtkTexture()
aTexture.SetInputConnection(ren1Image.GetOutputPort())
screen.SetTexture(aTexture)
# renWin.Render()
# render the image
#
renWin.Render()
#iren.Start()
|
hlzz/dotfiles
|
graphics/VTK-7.0.0/Rendering/Core/Testing/Python/rendererSource.py
|
Python
|
bsd-3-clause
| 3,438
|
[
"VTK"
] |
097d6a01ba731e3f465ceaa84569315cc97a9cd90499b485bf526c5eb9245640
|
# Simple python code for automatically bonusing workers on mTurk.
# Written by Desmond Ong (github.com/desmond-ong), 15 July 2013. Send comments to dco@stanford.edu.
# version 1.2, updated Jul 22, 2015
# Instructions:
# 1) replace "filename" with the name of the input file,
# 2) write the bonus message to participants,
# 3) fill in the location where CLT is installed.
# Then
# 4) run "python bonusScript.py"
# 4b) Export your javahome if necessary
# 5) run "sh " filename "-bonusBashScript.sh"
# 6) check filename "-bonusResults" for any errors.
#
# Format for filename: a csv file with:
# AssignmentID (*not* HIT ID) in the first column,
# workerID in the second column,
# and bonus amount in the third column (in dollars, no dollar sign).
# E.g.
#
# AssignmentID WorkerID Bonus
# 2XXX11X1X1X1XXXXX1XXXXX1XXXXXX X12XXXZXXXX73X 0.5
# 2XXX1XXXXX1XXXXXX1X1XXXXXXXXXX X13XXX4X5XXX6X 0.27
#
#
# You may also need to export your javahome. First run:
# /usr/libexec/java_home
# then run, replacing the path on the RHS of the = with the output from the above command
# export JAVA_HOME=/System/Library/Java/JavaVirtualMachines/1.6.0.jdk/Contents/Home
#
# if you want to set the JAVA_HOME in your profile so that
# it types that command for you automatically every time you start, edit:
# ~/.bash_profile
# using your favorite editor (e.g. vim), and type that export line.
# e.g. vim ~/.bash_profile
# "i" for insert, type the export line, hit escape, then type :wq, and <return> to exit.
#
#
#
#
# --------------
# Change Log
# v1.3, Aug 4, 2016
# - fixed typo in calculation of new AMT commission structure
# v1.2, Jul 22, 2015
# - updated calculation to incorporate new AMT commission structure
#
# v1.1
# - Made unique filenames (i.e. file will be filename + "-bonusScript.sh")
# - Added summary stats after creating the bash script:
# - how many people with how much bonus
# - Added logging to an output file.
#
# v1.0
# - basic functionality: took in a file name, wrote out a bunch of bonus commands to a bash script file
filename = "exampleBonusFile.csv"
bonusMessage = "Bonus for doing my HIT :)"
locationofCLT = "/XXX/aws-mturk-clt-1.3.1"
import csv
import os
from decimal import Decimal
currentWD = os.getcwd() # gets the current working directory, to place the results file.
rowNum = 0
bonusTotal = 0
bonusPeople = 0
bonusScripts = ""
if filename.endswith('.csv'):
outputFilename = filename[:-4]
else:
outputFilename = filename
with open(filename, 'rU') as f:
reader = csv.reader(f)
for row in reader:
if rowNum > 0: #skip the first row header
if Decimal(row[2]) > 0: # if bonus greater than 0
bonusPeople += 1;
bonusTotal += Decimal(row[2]);
bonusScripts = bonusScripts + "./grantBonus.sh -workerid " + row[1] + " -amount " + row[2] + " -assignment " + row[0] + " -reason " + "\"" + bonusMessage + "\" >> '" + currentWD + "/" + outputFilename + "-bonusResults' \n"
bonusScripts = bonusScripts + "echo ' --done bonusing participant number " + str(bonusPeople) + "' \n"
rowNum += 1
bonusScripts = bonusScripts + "echo 'Remember to check " + outputFilename + "-bonusResults for any errors!' \n"
#if (bonusPeople<10):
commission = 20
bonusTotalWithCommission = round((bonusTotal * Decimal(1.20))*100, 1)/100
#else:
# commission = 40
# bonusTotalWithCommission = round((bonusTotal * Decimal(1.40))*100, 1)/100
summaryMessage = "\n--- Done! Wrote a script for " + str(bonusPeople) + " participants with a total bonus amount of $" + str(bonusTotal) + " (excluding AMT comission).\n"
summaryMessage = summaryMessage + "With a " + str(commission) + "% commission, the total cost is probably $" + str(bonusTotalWithCommission) + "\n"
summaryMessage = summaryMessage + "Run: sh " + outputFilename + "-bonusBashScript.sh (Be sure to have your JAVA_HOME set!)" + "\n"
summaryMessage = summaryMessage + "After running the script, console output will be copied to " + outputFilename + "-bonusResults" + "\n"
print(summaryMessage)
#write the bash script for running the bonus commands
outputFilename = outputFilename + "-bonusBashScript.sh"
bonusBashScript = open(outputFilename, 'w')
bonusBashScript.write("#!/usr/bin/env sh\npushd " + locationofCLT + "/bin\n" + bonusScripts + "popd")
bonusBashScript.close()
|
desmond-ong/amtBonusScript
|
bonusScript.py
|
Python
|
mit
| 4,369
|
[
"Desmond"
] |
be1255ea4c324fca21bec40ae14d8c8b4c62eeb86fa4e1d76bc9c1fb20f7ad48
|
"""cpassdb - Config"""
__author__ = "Brian Wiborg <baccenfutter@c-base.org>"
__license__ = "GNU/GPLv2"
import os
STORAGE_ROOT = os.path.expanduser('~/.cpassdb')
OBJECTS_DIR = 'objects'
TIMESTAMP_FORMAT = 'YYYYMMDDHHmmss'
DEFAULT_TTL = 30
PROTOCOL_IDLE_TIMEOUT_SECONDS = 15
ADMIN_GROUP = 'wheels'
GNUPG_PATH = os.path.expanduser('~/.gnupg')
GNUPG_CONF = 'gpg.conf'
|
baccenfutter/cpassdb
|
cpassdb/config.py
|
Python
|
gpl-2.0
| 368
|
[
"Brian"
] |
419a194e24019b68da040685c8e10d9e4d90d76f7849647f7a7b5aeece6b89c3
|
# coding: utf-8
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
"""
This package implements modules for input and output to and from CREST
"""
|
gmatteo/pymatgen
|
pymatgen/io/xtb/__init__.py
|
Python
|
mit
| 190
|
[
"pymatgen"
] |
90c8aee366ecce18ef4a0c161562c1ea9f24063b5056960ec932d57137ae14e6
|
from __future__ import absolute_import
from __future__ import print_function
import os
import numpy as np
import matplotlib.pyplot as plt
import datetime
import clawpack.visclaw.colormaps as colormap
import clawpack.visclaw.gaugetools as gaugetools
import clawpack.clawutil.data as clawdata
import clawpack.amrclaw.data as amrclaw
import clawpack.geoclaw.data as geodata
import clawpack.geoclaw.topotools as topotools
import clawpack.geoclaw.util as geoutil
import clawpack.geoclaw.surge.plot as surgeplot
# to compare actual gauge data plot:
import csv
from clawpack.geoclaw.util import fetch_noaa_tide_data
try:
from setplotfg import setplotfg
except:
setplotfg = None
def days2seconds(days):
return days * 60.0**2 * 24.0
def setplot(plotdata):
r"""Setplot function for surge plotting"""
plotdata.clearfigures() # clear any old figures,axes,items data
plotdata.format = 'ascii'
# Load data from output
claw_data = clawdata.ClawInputData(2)
claw_data.read(os.path.join(plotdata.outdir, 'claw.data'))
physics = geodata.GeoClawData()
physics.read(os.path.join(plotdata.outdir, 'geoclaw.data'))
surge_data = geodata.SurgeData()
surge_data.read(os.path.join(plotdata.outdir, 'surge.data'))
friction_data = geodata.FrictionData()
friction_data.read(os.path.join(plotdata.outdir, 'friction.data'))
# Load storm track
track = surgeplot.track_data(os.path.join(plotdata.outdir,'fort.track'))
# Set afteraxes function
surge_afteraxes = lambda cd: surgeplot.surge_afteraxes(cd, track,
plot_direction=False)
def plot_coastline(cd):
"""Load fine coastline for plotting around NYC"""
try:
# Assumes that at path theres a fine topography file in NetCDF file format
path = "/Users/mandli/Dropbox/research/data/topography/atlantic/sandy_bathy/ny_area.nc"
topo_file = topotools.Topography(path, topo_type=4)
topo_file.read(nc_params={"x_var":"lon",
"y_var":"lat",
"z_var": "Band1"})
axes = plt.gca()
axes.contour(topo_file.X, topo_file.Y, topo_file.Z,
levels=[-0.001, 0.001],
colors='k', linestyles='-')
except:
pass
surge_afteraxes(cd)
# Color limits
surface_range = 4.5
speed_range = 1.0
# speed_range = 1.e-2
eta = physics.sea_level
if not isinstance(eta,list):
eta = [eta]
surface_limits = [eta[0]-surface_range,eta[0]+surface_range]
speed_limits = [0.0,speed_range]
wind_limits = [0, 55]
pressure_limits = [966, 1013]
friction_bounds = [0.01, 0.04]
vorticity_limits = [-1.e-2, 1.e-2]
land_bounds = [-10, 50]
# ==========================================================================
# Plot specifications
# ==========================================================================
# Limits for plots
regions = {'Full Domain': {"xlimits": [claw_data.lower[0], claw_data.upper[0]],
"ylimits": [claw_data.lower[1], claw_data.upper[1]],
"shrink": 1.0,
"figsize": [6.4, 4.8]},
'Tri-State Region': {"xlimits": [-74.5,-71.0],
"ylimits": [40.0,41.5],
"shrink": 1.0,
"figsize": [6.4, 4.8]},
'NYC': {"xlimits": [-74.2,-73.7],
"ylimits": [40.4,40.85],
"shrink": 1.0,
"figsize": [6.4, 4.8]}
}
def gauge_location_afteraxes(cd):
plt.subplots_adjust(left=0.12, bottom=0.06, right=0.97, top=0.97)
surge_afteraxes(cd)
gaugetools.plot_gauge_locations(cd.plotdata, gaugenos='all',
format_string='ko', add_labels=True)
for (name, region_dict) in regions.items():
# ========================================================================
# Surface Elevations
# ========================================================================
plotfigure = plotdata.new_plotfigure(name='Surface - %s' % name)
plotfigure.show = True
# Set up for axes in this figure:
plotaxes = plotfigure.new_plotaxes()
plotaxes.title = 'Surface'
plotaxes.scaled = True
plotaxes.xlimits = region_dict['xlimits']
plotaxes.ylimits = region_dict['ylimits']
plotaxes.afteraxes = plot_coastline
# plotaxes.afteraxes = surge_afteraxes
# plotaxes.afteraxes = gauge_location_afteraxes
surgeplot.add_surface_elevation(plotaxes, bounds=surface_limits,
shrink=region_dict['shrink'])
surgeplot.add_land(plotaxes, bounds=land_bounds)
plotaxes.plotitem_dict['land'].amr_patchedges_show = [0,0,0]
plotaxes.plotitem_dict['surface'].amr_patchedges_show = [0,0,0]
# ========================================================================
# Water Speed
# ========================================================================
plotfigure = plotdata.new_plotfigure(name='Currents - %s' % name)
plotfigure.show = True
# Set up for axes in this figure:
plotaxes = plotfigure.new_plotaxes()
plotaxes.title = 'Currents'
plotaxes.scaled = True
plotaxes.xlimits = region_dict['xlimits']
plotaxes.ylimits = region_dict['ylimits']
plotaxes.afteraxes = plot_coastline
surgeplot.add_speed(plotaxes, bounds=speed_limits,
shrink=region_dict['shrink'])
surgeplot.add_land(plotaxes, bounds=land_bounds)
plotaxes.plotitem_dict['land'].amr_patchedges_show = [0,0,0]
plotaxes.plotitem_dict['speed'].amr_patchedges_show = [0,0,0]
# ========================================================================
# Hurricane forcing - Entire Atlantic
# ========================================================================
# Friction field
plotfigure = plotdata.new_plotfigure(name='Friction')
plotfigure.show = False
plotaxes = plotfigure.new_plotaxes()
plotaxes.xlimits = regions['Full Domain']['xlimits']
plotaxes.ylimits = regions['Full Domain']['ylimits']
plotaxes.title = "Manning's N Coefficients"
plotaxes.afteraxes = surge_afteraxes
plotaxes.scaled = True
surgeplot.add_friction(plotaxes,bounds=friction_bounds)
# Pressure field
plotfigure = plotdata.new_plotfigure(name='Pressure')
plotfigure.show = True
plotaxes = plotfigure.new_plotaxes()
plotaxes.xlimits = regions['Full Domain']['xlimits']
plotaxes.ylimits = regions['Full Domain']['ylimits']
plotaxes.title = "Pressure Field"
plotaxes.afteraxes = surge_afteraxes
plotaxes.scaled = True
surgeplot.add_pressure(plotaxes,bounds=pressure_limits)
surgeplot.add_land(plotaxes, bounds=[-10, 500])
# Wind field
plotfigure = plotdata.new_plotfigure(name='Wind Speed')
plotfigure.show = True
plotaxes = plotfigure.new_plotaxes()
plotaxes.xlimits = regions['Full Domain']['xlimits']
plotaxes.ylimits = regions['Full Domain']['ylimits']
plotaxes.title = "Wind Field"
plotaxes.afteraxes = surge_afteraxes
plotaxes.scaled = True
surgeplot.add_wind(plotaxes,bounds=wind_limits,plot_type='imshow')
surgeplot.add_land(plotaxes, bounds=[-10, 500])
# ========================================================================
# Figures for gauges
# ========================================================================
plotfigure = plotdata.new_plotfigure(name='Surface & topo', figno=300, \
type='each_gauge')
plotfigure.show = True
plotfigure.clf_each_gauge = True
stations = [('8518750', 'The Battery, NY'),
('8516945', 'Kings Point, NY'),
('8519483', 'Bergen Point West Reach, NY')]
#('8531680','Sandy Hook, NY'),
#('n03020','Narrows,NY')]
landfall_time = np.datetime64('2012-10-29T23:30')
begin_date = datetime.datetime(2012, 10, 28)
end_date = datetime.datetime(2012, 10, 31,)
def get_actual_water_levels(station_id):
# Fetch water levels and tide predictions for given station
date_time, water_level, tide = fetch_noaa_tide_data(station_id,
begin_date, end_date)
# Calculate times relative to landfall
seconds_rel_landfall = (date_time - landfall_time) / np.timedelta64(1, 's')
# Subtract tide predictions from measured water levels
water_level -= tide
return seconds_rel_landfall, water_level
def gauge_afteraxes(cd):
station_id, station_name = stations[cd.gaugeno-1]
seconds_rel_landfall, actual_level = get_actual_water_levels(station_id)
axes = plt.gca()
#surgeplot.plot_landfall_gauge(cd.gaugesoln, axes, landfall=landfall)
axes.plot(seconds_rel_landfall, actual_level, 'g')
# Fix up plot - in particular fix time labels
axes.set_title(station_name)
axes.set_xlabel('Seconds relative to landfall')
axes.set_ylabel('Surface (m)')
axes.set_xlim([days2seconds(-2), days2seconds(1)])
axes.set_ylim([0, 4])
axes.set_xticks([ days2seconds(-2), days2seconds(-1), 0, days2seconds(1)])
#axes.set_xticklabels([r"$-3$", r"$-2$", r"$-1$", r"$0$", r"$1$"])
#axes.grid(True)
# Set up for axes in this figure:
plotaxes = plotfigure.new_plotaxes()
plotaxes.afteraxes = gauge_afteraxes
# Plot surface as blue curve:
plotitem = plotaxes.new_plotitem(plot_type='1d_plot')
plotitem.plot_var = 3
plotitem.plotstyle = 'b-'
#-----------------------------------------
# Parameters used only when creating html and/or latex hardcopy
# e.g., via pyclaw.plotters.frametools.printframes:
plotdata.printfigs = True # print figures
plotdata.print_format = 'png' # file format
plotdata.print_framenos = 'all' # list of frames to print
plotdata.print_gaugenos = 'all' # list of gauges to print
plotdata.print_fignos = 'all' # list of figures to print
plotdata.html = True # create html files of plots?
plotdata.html_homelink = '../README.html' # pointer for top of index
plotdata.latex = True # create latex file of plots?
plotdata.latex_figsperline = 2 # layout of plots
plotdata.latex_framesperline = 1 # layout of plots
plotdata.latex_makepdf = False # also run pdflatex?
return plotdata
|
mandli/surge-examples
|
sandy/setplot.py
|
Python
|
mit
| 10,948
|
[
"NetCDF"
] |
de21a4d632b3704ff42734ba3f168236b666a5f92c664d5a96e933bb84cdbcbe
|
"""
General API for testing dataset objects
"""
from __future__ import print_function
from __future__ import division
from __future__ import unicode_literals
__author__ = "Bharath Ramsundar"
__copyright__ = "Copyright 2016, Stanford University"
__license__ = "GPL"
import unittest
import tempfile
import os
import shutil
import numpy as np
import deepchem as dc
def load_solubility_data():
"""Loads solubility dataset"""
current_dir = os.path.dirname(os.path.abspath(__file__))
featurizer = dc.feat.CircularFingerprint(size=1024)
tasks = ["log-solubility"]
task_type = "regression"
input_file = os.path.join(current_dir, "../../models/tests/example.csv")
loader = dc.data.CSVLoader(
tasks=tasks, smiles_field="smiles", featurizer=featurizer)
return loader.featurize(input_file)
def load_multitask_data():
"""Load example multitask data."""
current_dir = os.path.dirname(os.path.abspath(__file__))
featurizer = dc.feat.CircularFingerprint(size=1024)
tasks = ["task0", "task1", "task2", "task3", "task4", "task5", "task6",
"task7", "task8", "task9", "task10", "task11", "task12",
"task13", "task14", "task15", "task16"]
input_file = os.path.join(
current_dir, "../../models/tests/multitask_example.csv")
loader = dc.data.CSVLoader(
tasks=tasks, smiles_field="smiles", featurizer=featurizer)
return loader.featurize(input_file)
def load_classification_data():
"""Loads classification data from example.csv"""
current_dir = os.path.dirname(os.path.abspath(__file__))
featurizer = dc.feat.CircularFingerprint(size=1024)
tasks = ["outcome"]
task_type = "classification"
input_file = os.path.join(
current_dir, "../../models/tests/example_classification.csv")
loader = dc.data.CSVLoader(
tasks=tasks, smiles_field="smiles", featurizer=featurizer)
return loader.featurize(input_file)
def load_sparse_multitask_dataset():
"""Load sparse tox multitask data, sample dataset."""
current_dir = os.path.dirname(os.path.abspath(__file__))
featurizer = dc.feat.CircularFingerprint(size=1024)
tasks = ["task1", "task2", "task3", "task4", "task5", "task6",
"task7", "task8", "task9"]
input_file = os.path.join(
current_dir, "../../models/tests/sparse_multitask_example.csv")
loader = dc.data.CSVLoader(
tasks=tasks, smiles_field="smiles", featurizer=featurizer)
return loader.featurize(input_file)
def load_feat_multitask_data():
"""Load example with numerical features, tasks."""
current_dir = os.path.dirname(os.path.abspath(__file__))
features = ["feat0", "feat1", "feat2", "feat3", "feat4", "feat5"]
featurizer = dc.feat.UserDefinedFeaturizer(features)
tasks = ["task0", "task1", "task2", "task3", "task4", "task5"]
input_file = os.path.join(
current_dir, "../../models/tests/feat_multitask_example.csv")
loader = dc.data.UserCSVLoader(
tasks=tasks, featurizer=featurizer, id_field="id")
return loader.featurize(input_file)
def load_gaussian_cdf_data():
"""Load example with numbers sampled from Gaussian normal distribution.
Each feature and task is a column of values that is sampled
from a normal distribution of mean 0, stdev 1."""
current_dir = os.path.dirname(os.path.abspath(__file__))
features = ["feat0","feat1"]
featurizer = dc.feat.UserDefinedFeaturizer(features)
tasks = ["task0","task1"]
input_file = os.path.join(
current_dir, "../../models/tests/gaussian_cdf_example.csv")
loader = dc.data.UserCSVLoader(
tasks=tasks, featurizer=featurizer, id_field="id")
return loader.featurize(input_file)
|
bowenliu16/deepchem
|
deepchem/data/tests/__init__.py
|
Python
|
gpl-3.0
| 3,608
|
[
"Gaussian"
] |
074a646eaae53197be216c18015c71fa3707bbafc969cf1e5eee83dbe3885ec5
|
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
import json
import os
import unittest
import warnings
import numpy as np
from monty.json import MontyDecoder
from pymatgen.analysis.xas.spectrum import XAS, site_weighted_spectrum
from pymatgen.core import Element
from pymatgen.util.testing import PymatgenTest
test_dir = os.path.join(PymatgenTest.TEST_FILES_DIR, "spectrum_test")
with open(os.path.join(test_dir, "LiCoO2_k_xanes.json")) as fp:
k_xanes_dict = json.load(fp, cls=MontyDecoder)
with open(os.path.join(test_dir, "LiCoO2_k_exafs.json")) as fp:
k_exafs_dict = json.load(fp, cls=MontyDecoder)
with open(os.path.join(test_dir, "ZnO_l2_xanes.json")) as fp:
l2_xanes_dict = json.load(fp, cls=MontyDecoder)
with open(os.path.join(test_dir, "ZnO_l3_xanes.json")) as fp:
l3_xanes_dict = json.load(fp, cls=MontyDecoder)
with open(os.path.join(test_dir, "site1_k_xanes.json")) as fp:
site1_xanes_dict = json.load(fp, cls=MontyDecoder)
with open(os.path.join(test_dir, "site2_k_xanes.json")) as fp:
site2_xanes_dict = json.load(fp, cls=MontyDecoder)
class XASTest(PymatgenTest):
def setUp(self):
self.k_xanes = XAS.from_dict(k_xanes_dict)
self.k_exafs = XAS.from_dict(k_exafs_dict)
self.l2_xanes = XAS.from_dict(l2_xanes_dict)
self.l3_xanes = XAS.from_dict(l3_xanes_dict)
self.site1_xanes = XAS.from_dict(site1_xanes_dict)
self.site2_xanes = XAS.from_dict(site2_xanes_dict)
def test_e0(self):
self.assertAlmostEqual(7728.565, self.k_xanes.e0)
def test_k(self):
self.assertEqual(len(self.k_xanes.x), len(self.k_xanes.k))
self.assertAlmostEqual(self.k_xanes.e0, self.k_xanes.x[self.k_xanes.k.index(0)])
def test_normalization(self):
self.k_xanes.normalize(mode="sum")
self.assertAlmostEqual(1.0, np.sum(self.k_xanes.y))
def test_add_mul(self):
scaled_spect = self.k_xanes + self.k_xanes
scaled_spect2 = self.k_xanes * 3
self.assertTrue(np.allclose(scaled_spect.y, 2 * self.k_xanes.y))
self.assertTrue(np.allclose(scaled_spect2.y, 3 * self.k_xanes.y))
self.assertAlmostEqual(0.274302, self.k_xanes.get_interpolated_value(7720.422), 3)
def test_to_from_dict(self):
s = XAS.from_dict(self.k_xanes.as_dict())
self.assertArrayAlmostEqual(s.y, self.k_xanes.y)
def test_attributes(self):
self.assertArrayEqual(self.k_xanes.energy, self.k_xanes.x)
self.assertArrayEqual(self.k_xanes.intensity, self.k_xanes.y)
def test_str(self):
self.assertIsNotNone(str(self.k_xanes))
def test_validate(self):
y_zeros = np.zeros(len(self.k_xanes.x))
self.assertRaises(
ValueError,
XAS,
self.k_xanes.x,
y_zeros,
self.k_xanes.structure,
self.k_xanes.absorbing_element,
)
def test_stitch_xafs(self):
self.assertRaises(ValueError, XAS.stitch, self.k_xanes, self.k_exafs, mode="invalid")
xafs = XAS.stitch(self.k_xanes, self.k_exafs, mode="XAFS")
self.assertIsInstance(xafs, XAS)
self.assertEqual("XAFS", xafs.spectrum_type)
self.assertEqual(len(xafs.x), 500)
self.assertAlmostEqual(min(xafs.x), min(self.k_xanes.x), 2)
self.assertAlmostEqual(max(xafs.y), max(self.k_xanes.y), 2)
self.assertAlmostEqual(
xafs.x[np.argmax(np.gradient(xafs.y) / np.gradient(xafs.x))],
self.k_xanes.e0,
2,
)
self.assertRaises(ValueError, XAS.stitch, self.k_xanes, self.l2_xanes, mode="XAFS")
self.k_xanes.x = np.zeros(100)
self.assertRaises(ValueError, XAS.stitch, self.k_xanes, self.k_exafs)
self.k_xanes.absorbing_element = Element("Pt")
self.assertRaises(ValueError, XAS.stitch, self.k_xanes, self.k_exafs, mode="XAFS")
def test_stitch_l23(self):
self.l2_xanes.y[0] = 0.1
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always")
XAS.stitch(self.l2_xanes, self.l3_xanes, 100, mode="L23")
self.assertEqual(len(w), 6)
self.assertIs(w[-1].category, UserWarning)
self.assertIn("jump", str(w[-1].message))
self.l2_xanes = XAS.from_dict(l2_xanes_dict)
l23 = XAS.stitch(self.l2_xanes, self.l3_xanes, 100, mode="L23")
self.assertIsInstance(l23, XAS)
self.assertEqual("L23", l23.edge)
self.assertAlmostEqual(min(l23.x), min(self.l3_xanes.x), 3)
self.assertAlmostEqual(max(l23.x), max(self.l3_xanes.x), 3)
self.assertTrue(np.greater_equal(l23.y, self.l2_xanes.y).all())
self.assertEqual(len(l23.x), 100)
self.l2_xanes.spectrum_type = "EXAFS"
self.assertRaises(ValueError, XAS.stitch, self.l2_xanes, self.l3_xanes, mode="L23")
self.l2_xanes.absorbing_element = Element("Pt")
self.assertRaises(ValueError, XAS.stitch, self.l2_xanes, self.l3_xanes, mode="L23")
self.assertRaises(ValueError, XAS.stitch, self.k_xanes, self.l3_xanes, mode="L23")
def test_site_weighted_spectrum(self):
weighted_spectrum = site_weighted_spectrum([self.site1_xanes, self.site2_xanes])
self.assertIsInstance(weighted_spectrum, XAS)
self.assertTrue(len(weighted_spectrum.x), 500)
# The site multiplicities for site1 and site2 are 4 and 2, respectively.
self.assertAlmostEqual(
weighted_spectrum.y[0],
(4 * self.site1_xanes.y[0] + 2 * self.site2_xanes.y[0]) / 6,
2,
)
self.assertEqual(
min(weighted_spectrum.x),
max(min(self.site1_xanes.x), min(self.site2_xanes.x)),
)
self.site2_xanes.absorbing_index = self.site1_xanes.absorbing_index
self.assertRaises(ValueError, site_weighted_spectrum, [self.site1_xanes, self.site2_xanes])
if __name__ == "__main__":
unittest.main()
|
vorwerkc/pymatgen
|
pymatgen/analysis/xas/tests/test_spectrum.py
|
Python
|
mit
| 5,982
|
[
"pymatgen"
] |
5ba724b14273bb266c98a82e1429fde795035efbb2317c448a330cd54d44eb4a
|
#!/usr/bin/python
#
# @BEGIN LICENSE
#
# Psi4: an open-source quantum chemistry software package
#
# Copyright (c) 2007-2017 The Psi4 Developers.
#
# The copyrights for code used from other parties are included in
# the corresponding files.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# @END LICENSE
#
import sys
import os
import glob
import re
DriverPath = ''
InsertPath = '/../../../'
if (len(sys.argv) == 2):
DriverPath = sys.argv[1] + '/'
sys.path.insert(0, os.path.abspath(os.getcwd()))
def pts(category, pyfile):
print('Auto-documenting %s file %s' % (category, pyfile))
# Main driver modules in psi4/driver
fdriver = open('source/autodoc_driver.rst', 'w')
fdriver.write('\n.. include:: /autodoc_abbr_options_c.rst\n\n')
fdriver.write('.. _`sec:driver`:\n\n')
fdriver.write('=============\n')
fdriver.write('Python Driver\n')
fdriver.write('=============\n\n')
for pyfile in glob.glob(DriverPath + '../../psi4/driver/*.py'):
filename = os.path.split(pyfile)[1]
basename = os.path.splitext(filename)[0]
div = '=' * len(basename)
if basename not in ['inpsight', 'pep8', 'diatomic_fits', 'pyparsing', 'computation_cache']:
pts('driver', basename)
fdriver.write(basename + '\n')
fdriver.write(div + '\n\n')
fdriver.write('.. automodule:: %s\n' % (basename))
fdriver.write(' :members:\n')
fdriver.write(' :undoc-members:\n')
if basename == 'driver':
fdriver.write(' :exclude-members: energy, optimize, opt, frequency, frequencies, freq, property, prop, molden, gdma, fchk, gradient, hessian\n')
elif basename == 'wrapper_database':
fdriver.write(' :exclude-members: db, database\n')
elif basename == 'driver_nbody':
fdriver.write(' :exclude-members: nbody_gufunc\n')
elif basename == 'driver_cbs':
fdriver.write(' :exclude-members: cbs, complete_basis_set, xtpl_highest_1,\n')
fdriver.write(' scf_xtpl_helgaker_3, scf_xtpl_helgaker_2, corl_xtpl_helgaker_2, n_body\n')
# elif basename == 'physconst':
# fdriver.write('\n.. literalinclude:: %sdriver/%s\n' % (IncludePath, filename))
elif basename == 'diatomic':
fdriver.write(' :exclude-members: anharmonicity\n')
# elif basename == 'interface_dftd3':
# fdriver.write(' :exclude-members: run_dftd3\n')
# elif basename == 'interface_cfour':
# fdriver.write(' :exclude-members: run_cfour\n')
elif basename == 'aliases':
fdriver.write(' :exclude-members: sherrill_gold_standard, allen_focal_point\n')
elif basename == 'p4util':
fdriver.write(' :exclude-members: oeprop, cubeprop\n')
elif basename == 'procedures':
fdriver.write(' :exclude-members: interface_cfour\n')
fdriver.write('\n')
# Python-only plugin modules in psi4/driver
for basename in os.walk(DriverPath + '../../psi4/driver').next()[1]:
div = '=' * len(basename)
if basename not in ['grendel']:
pts('driver', basename)
fdriver.write(basename + '\n')
fdriver.write(div + '\n\n')
fdriver.write('.. automodule:: %s\n' % (basename))
fdriver.write(' :members:\n')
fdriver.write(' :undoc-members:\n')
for pyfile in glob.glob(DriverPath + '../../psi4/driver/' + basename + '/*py'):
filename = os.path.split(pyfile)[1]
basename2 = os.path.splitext(filename)[0]
div = '=' * len(basename2)
fdriver.write('.. automodule:: %s.%s\n' % (basename, basename2))
fdriver.write(' :members:\n')
fdriver.write(' :undoc-members:\n')
if basename == 'qcdb' and basename2 == 'interface_dftd3':
fdriver.write(' :exclude-members: run_dftd3\n')
fdriver.write('\n')
fdriver.write('\n')
fdriver.close()
|
andysim/psi4
|
doc/sphinxman/document_driver.py
|
Python
|
gpl-2.0
| 4,584
|
[
"Psi4"
] |
fe1a256f01030cac87038bea694704e7a0bf9ebb831e1cb241d3703f1dd34511
|
# Copyright 2021 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# [START composer_gkeoperator]
from airflow import models
from airflow.providers.google.cloud.operators.kubernetes_engine import (
GKECreateClusterOperator,
GKEDeleteClusterOperator,
GKEStartPodOperator,
)
from airflow.utils.dates import days_ago
with models.DAG(
"example_gcp_gke",
schedule_interval=None, # Override to match your needs
start_date=days_ago(1),
tags=["example"],
) as dag:
# [START composer_gke_create_cluster]
# [START composer_gkeoperator_minconfig]
# [START composer_gkeoperator_templateconfig]
# [START composer_gkeoperator_affinity]
# [START composer_gkeoperator_fullconfig]
# TODO(developer): update with your values
PROJECT_ID = "my-project-id"
CLUSTER_ZONE = "us-west1-a"
CLUSTER_NAME = "example-cluster"
# [END composer_gkeoperator_minconfig]
# [END composer_gkeoperator_templateconfig]
# [END composer_gkeoperator_affinity]
# [END composer_gkeoperator_fullconfig]
CLUSTER = {
"name": CLUSTER_NAME,
"node_pools": [
{"name": "pool-0", "initial_node_count": 1},
{"name": "pool-1", "initial_node_count": 1},
],
}
# [END composer_gke_create_cluster]
# [START composer_gke_create_cluster]
create_cluster = GKECreateClusterOperator(
task_id="create_cluster",
project_id=PROJECT_ID,
location=CLUSTER_ZONE,
body=CLUSTER,
)
# [END composer_gke_create_cluster]
# [START composer_gkeoperator_minconfig]
kubernetes_min_pod = GKEStartPodOperator(
# The ID specified for the task.
task_id="pod-ex-minimum",
# Name of task you want to run, used to generate Pod ID.
name="pod-ex-minimum",
project_id=PROJECT_ID,
location=CLUSTER_ZONE,
cluster_name=CLUSTER_NAME,
# Entrypoint of the container, if not specified the Docker container's
# entrypoint is used. The cmds parameter is templated.
cmds=["echo"],
# The namespace to run within Kubernetes, default namespace is
# `default`.
namespace="default",
# Docker image specified. Defaults to hub.docker.com, but any fully
# qualified URLs will point to a custom repository. Supports private
# gcr.io images if the Composer Environment is under the same
# project-id as the gcr.io images and the service account that Composer
# uses has permission to access the Google Container Registry
# (the default service account has permission)
image="gcr.io/gcp-runtimes/ubuntu_18_0_4",
)
# [END composer_gkeoperator_minconfig]
# [START composer_gkeoperator_templateconfig]
kubenetes_template_ex = GKEStartPodOperator(
task_id="ex-kube-templates",
name="ex-kube-templates",
project_id=PROJECT_ID,
location=CLUSTER_ZONE,
cluster_name=CLUSTER_NAME,
namespace="default",
image="bash",
# All parameters below are able to be templated with jinja -- cmds,
# arguments, env_vars, and config_file. For more information visit:
# https://airflow.apache.org/docs/apache-airflow/stable/macros-ref.html
# Entrypoint of the container, if not specified the Docker container's
# entrypoint is used. The cmds parameter is templated.
cmds=["echo"],
# DS in jinja is the execution date as YYYY-MM-DD, this docker image
# will echo the execution date. Arguments to the entrypoint. The docker
# image's CMD is used if this is not provided. The arguments parameter
# is templated.
arguments=["{{ ds }}"],
# The var template variable allows you to access variables defined in
# Airflow UI. In this case we are getting the value of my_value and
# setting the environment variable `MY_VALUE`. The pod will fail if
# `my_value` is not set in the Airflow UI.
env_vars={"MY_VALUE": "{{ var.value.my_value }}"},
)
# [END composer_gkeoperator_templateconfig]
# [START composer_gkeoperator_affinity]
kubernetes_affinity_ex = GKEStartPodOperator(
task_id="ex-pod-affinity",
project_id=PROJECT_ID,
location=CLUSTER_ZONE,
cluster_name=CLUSTER_NAME,
name="ex-pod-affinity",
namespace="default",
image="perl",
cmds=["perl"],
arguments=["-Mbignum=bpi", "-wle", "print bpi(2000)"],
# affinity allows you to constrain which nodes your pod is eligible to
# be scheduled on, based on labels on the node. In this case, if the
# label 'cloud.google.com/gke-nodepool' with value
# 'nodepool-label-value' or 'nodepool-label-value2' is not found on any
# nodes, it will fail to schedule.
affinity={
"nodeAffinity": {
# requiredDuringSchedulingIgnoredDuringExecution means in order
# for a pod to be scheduled on a node, the node must have the
# specified labels. However, if labels on a node change at
# runtime such that the affinity rules on a pod are no longer
# met, the pod will still continue to run on the node.
"requiredDuringSchedulingIgnoredDuringExecution": {
"nodeSelectorTerms": [
{
"matchExpressions": [
{
# When nodepools are created in Google Kubernetes
# Engine, the nodes inside of that nodepool are
# automatically assigned the label
# 'cloud.google.com/gke-nodepool' with the value of
# the nodepool's name.
"key": "cloud.google.com/gke-nodepool",
"operator": "In",
# The label key's value that pods can be scheduled
# on.
"values": [
"pool-1",
],
}
]
}
]
}
}
},
)
# [END composer_gkeoperator_affinity]
# [START composer_gkeoperator_fullconfig]
kubernetes_full_pod = GKEStartPodOperator(
task_id="ex-all-configs",
name="full",
project_id=PROJECT_ID,
location=CLUSTER_ZONE,
cluster_name=CLUSTER_NAME,
namespace="default",
image="perl",
# Entrypoint of the container, if not specified the Docker container's
# entrypoint is used. The cmds parameter is templated.
cmds=["perl"],
# Arguments to the entrypoint. The docker image's CMD is used if this
# is not provided. The arguments parameter is templated.
arguments=["-Mbignum=bpi", "-wle", "print bpi(2000)"],
# The secrets to pass to Pod, the Pod will fail to create if the
# secrets you specify in a Secret object do not exist in Kubernetes.
secrets=[],
# Labels to apply to the Pod.
labels={"pod-label": "label-name"},
# Timeout to start up the Pod, default is 120.
startup_timeout_seconds=120,
# The environment variables to be initialized in the container
# env_vars are templated.
env_vars={"EXAMPLE_VAR": "/example/value"},
# If true, logs stdout output of container. Defaults to True.
get_logs=True,
# Determines when to pull a fresh image, if 'IfNotPresent' will cause
# the Kubelet to skip pulling an image if it already exists. If you
# want to always pull a new image, set it to 'Always'.
image_pull_policy="Always",
# Annotations are non-identifying metadata you can attach to the Pod.
# Can be a large range of data, and can include characters that are not
# permitted by labels.
annotations={"key1": "value1"},
# Resource specifications for Pod, this will allow you to set both cpu
# and memory limits and requirements.
# Prior to Airflow 1.10.4, resource specifications were
# passed as a Pod Resources Class object,
# If using this example on a version of Airflow prior to 1.10.4,
# import the "pod" package from airflow.contrib.kubernetes and use
# resources = pod.Resources() instead passing a dict
# For more info see:
# https://github.com/apache/airflow/pull/4551
resources={"limit_memory": "250M", "limit_cpu": "100m"},
# If true, the content of /airflow/xcom/return.json from container will
# also be pushed to an XCom when the container ends.
do_xcom_push=False,
# List of Volume objects to pass to the Pod.
volumes=[],
# List of VolumeMount objects to pass to the Pod.
volume_mounts=[],
# Affinity determines which nodes the Pod can run on based on the
# config. For more information see:
# https://kubernetes.io/docs/concepts/configuration/assign-pod-node/
affinity={},
)
# [END composer_gkeoperator_fullconfig]
# [START composer_gkeoperator_delete_cluster]
delete_cluster = GKEDeleteClusterOperator(
task_id="delete_cluster",
name=CLUSTER_NAME,
project_id=PROJECT_ID,
location=CLUSTER_ZONE,
)
# [END composer_gkeoperator_delete_cluster]
create_cluster >> kubernetes_min_pod >> delete_cluster
create_cluster >> kubernetes_full_pod >> delete_cluster
create_cluster >> kubernetes_affinity_ex >> delete_cluster
create_cluster >> kubenetes_template_ex >> delete_cluster
# [END composer_gkeoperator]
|
GoogleCloudPlatform/python-docs-samples
|
composer/workflows/gke_operator.py
|
Python
|
apache-2.0
| 10,478
|
[
"VisIt"
] |
bfa8f09ffdadf61a046ff812b90bc180e2ae92657dbc085c85ea1b7dcb5579a7
|
import os
from boxbranding import getMachineBrand, getMachineName
import xml.etree.cElementTree
from datetime import datetime
from time import ctime, time, strftime, localtime, mktime
from bisect import insort
from enigma import eActionMap, quitMainloop
from Components.config import config
from Components.TimerSanityCheck import TimerSanityCheck
from Screens.MessageBox import MessageBox
import Screens.Standby
from Tools import Directories, Notifications
from Tools.XMLTools import stringToXML
import timer
import NavigationInstance
#global variables begin
DSsave = False
RSsave = False
RBsave = False
aeDSsave = False
wasTimerWakeup = False
#+++
debug = False
#+++
#global variables end
#----------------------------------------------------------------------------------------------------
#Timer shutdown, reboot and restart priority
#1. wakeup
#2. wakeuptostandby -> (same as 1.)
#3. deepstandby -> DSsave
#4. deppstandby after event -> aeDSsave
#5. reboot system -> RBsave
#6. restart gui -> RSsave
#7. standby
#8. autostandby
#9. nothing (no function, only for suppress autodeepstandby timer)
#10. autodeepstandby
#-overlapping timers or next timer start is within 15 minutes, will only the high-order timer executed (at same types will executed the next timer)
#-autodeepstandby timer is only effective if no other timer is active or current time is in the time window
#-priority for repeated timer: shift from begin and end time only temporary, end-action priority is higher as the begin-action
#----------------------------------------------------------------------------------------------------
#reset wakeup state after ending timer
def resetTimerWakeup():
global wasTimerWakeup
if os.path.exists("/tmp/was_powertimer_wakeup"):
os.remove("/tmp/was_powertimer_wakeup")
if debug: print "[POWERTIMER] reset wakeup state"
wasTimerWakeup = False
# parses an event, and gives out a (begin, end, name, duration, eit)-tuple.
# begin and end will be corrected
def parseEvent(ev):
begin = ev.getBeginTime()
end = begin + ev.getDuration()
return begin, end
class AFTEREVENT:
def __init__(self):
pass
NONE = 0
WAKEUP = 1
WAKEUPTOSTANDBY = 2
STANDBY = 3
DEEPSTANDBY = 4
class TIMERTYPE:
def __init__(self):
pass
NONE = 0
WAKEUP = 1
WAKEUPTOSTANDBY = 2
AUTOSTANDBY = 3
AUTODEEPSTANDBY = 4
STANDBY = 5
DEEPSTANDBY = 6
REBOOT = 7
RESTART = 8
# please do not translate log messages
class PowerTimerEntry(timer.TimerEntry, object):
def __init__(self, begin, end, disabled = False, afterEvent = AFTEREVENT.NONE, timerType = TIMERTYPE.WAKEUP, checkOldTimers = False, autosleepdelay = 60):
timer.TimerEntry.__init__(self, int(begin), int(end))
if checkOldTimers:
if self.begin < time() - 1209600:
self.begin = int(time())
if self.end < self.begin:
self.end = self.begin
self.dontSave = False
self.disabled = disabled
self.timer = None
self.__record_service = None
self.start_prepare = 0
self.timerType = timerType
self.afterEvent = afterEvent
self.autoincrease = False
self.autoincreasetime = 3600 * 24 # 1 day
self.autosleepinstandbyonly = 'no'
self.autosleepdelay = autosleepdelay
self.autosleeprepeat = 'once'
self.autosleepwindow = 'no'
self.autosleepbegin = self.begin
self.autosleepend = self.end
self.nettraffic = 'no'
self.trafficlimit = 100
self.netip = 'no'
self.ipadress = "0.0.0.0"
self.log_entries = []
self.resetState()
#check autopowertimer
if (self.timerType == TIMERTYPE.AUTOSTANDBY or self.timerType == TIMERTYPE.AUTODEEPSTANDBY) and not self.disabled and time() > 3600 and self.begin > time():
self.begin = int(time()) #the begin is in the future -> set to current time = no start delay of this timer
def __repr__(self):
timertype = {
TIMERTYPE.NONE: "nothing",
TIMERTYPE.WAKEUP: "wakeup",
TIMERTYPE.WAKEUPTOSTANDBY: "wakeuptostandby",
TIMERTYPE.AUTOSTANDBY: "autostandby",
TIMERTYPE.AUTODEEPSTANDBY: "autodeepstandby",
TIMERTYPE.STANDBY: "standby",
TIMERTYPE.DEEPSTANDBY: "deepstandby",
TIMERTYPE.REBOOT: "reboot",
TIMERTYPE.RESTART: "restart"
}[self.timerType]
if not self.disabled:
return "PowerTimerEntry(type=%s, begin=%s)" % (timertype, ctime(self.begin))
else:
return "PowerTimerEntry(type=%s, begin=%s Disabled)" % (timertype, ctime(self.begin))
def log(self, code, msg):
self.log_entries.append((int(time()), code, msg))
def do_backoff(self):
if Screens.Standby.inStandby and not wasTimerWakeup or RSsave or RBsave or aeDSsave or DSsave:
self.backoff = 300
else:
if self.backoff == 0:
self.backoff = 300
else:
self.backoff += 300
if self.backoff > 900:
self.backoff = 900
self.log(10, "backoff: retry in %d minutes" % (int(self.backoff)/60))
def activate(self):
global RSsave, RBsave, DSsave, aeDSsave, wasTimerWakeup
isRecTimerWakeup = breakPT = shiftPT = False
now = time()
next_state = self.state + 1
self.log(5, "activating state %d" % next_state)
if next_state == self.StatePrepared and (self.timerType == TIMERTYPE.AUTOSTANDBY or self.timerType == TIMERTYPE.AUTODEEPSTANDBY):
eActionMap.getInstance().bindAction('', -0x7FFFFFFF, self.keyPressed)
if self.autosleepwindow == 'yes':
ltm = localtime(now)
asb = strftime("%H:%M", localtime(self.autosleepbegin)).split(':')
ase = strftime("%H:%M", localtime(self.autosleepend)).split(':')
self.autosleepbegin = int(mktime(datetime(ltm.tm_year, ltm.tm_mon, ltm.tm_mday, int(asb[0]), int(asb[1])).timetuple()))
self.autosleepend = int(mktime(datetime(ltm.tm_year, ltm.tm_mon, ltm.tm_mday, int(ase[0]), int(ase[1])).timetuple()))
if self.autosleepend <= self.autosleepbegin:
self.autosleepbegin -= 86400
if self.getAutoSleepWindow():
if now < self.autosleepbegin and now > self.autosleepbegin - self.prepare_time - 3: #begin is in prepare time window
self.begin = self.end = self.autosleepbegin + int(self.autosleepdelay)*60
else:
self.begin = self.end = int(now) + int(self.autosleepdelay)*60
else:
return False
if self.timerType == TIMERTYPE.AUTODEEPSTANDBY:
self.getNetworkTraffic(getInitialValue = True)
if (next_state == self.StateRunning or next_state == self.StateEnded) and NavigationInstance.instance.PowerTimer is None:
#TODO: running/ended timer at system start has no nav instance
#First fix: crash in getPriorityCheck (NavigationInstance.instance.PowerTimer...)
#Second fix: suppress the message (A finished powertimer wants to ...)
if debug: print "*****NavigationInstance.instance.PowerTimer is None*****", self.timerType, self.state, ctime(self.begin), ctime(self.end)
return True
elif next_state == self.StateRunning and abs(self.begin - now) > 900: return True
elif next_state == self.StateEnded and abs(self.end - now) > 900: return True
if next_state == self.StateRunning or next_state == self.StateEnded:
if NavigationInstance.instance.isRecordTimerImageStandard:
isRecTimerWakeup = NavigationInstance.instance.RecordTimer.isRecTimerWakeup()
if isRecTimerWakeup:
wasTimerWakeup = True
elif os.path.exists("/tmp/was_powertimer_wakeup") and not wasTimerWakeup:
wasTimerWakeup = int(open("/tmp/was_powertimer_wakeup", "r").read()) and True or False
if next_state == self.StatePrepared:
self.log(6, "prepare ok, waiting for begin: %s" % ctime(self.begin))
self.backoff = 0
return True
elif next_state == self.StateRunning:
# if this timer has been cancelled, just go to "end" state.
if self.cancelled:
return True
if self.failed:
return True
if self.timerType == TIMERTYPE.NONE:
return True
elif self.timerType == TIMERTYPE.WAKEUP:
if debug: print "self.timerType == TIMERTYPE.WAKEUP:"
if Screens.Standby.inStandby:
Screens.Standby.inStandby.Power()
return True
#elif self.timerType == TIMERTYPE.WAKEUPTOSTANDBY:
# return True
elif self.timerType == TIMERTYPE.WAKEUPTOSTANDBY or self.timerType == TIMERTYPE.STANDBY:
if debug:
if self.timerType == TIMERTYPE.WAKEUPTOSTANDBY:
print "self.timerType == TIMERTYPE.WAKEUPTOSTANDBY:"
else:
print "self.timerType == TIMERTYPE.STANDBY:"
prioPT = [TIMERTYPE.WAKEUP,TIMERTYPE.RESTART,TIMERTYPE.REBOOT,TIMERTYPE.DEEPSTANDBY]
prioPTae = [AFTEREVENT.WAKEUP,AFTEREVENT.DEEPSTANDBY]
shiftPT,breakPT = self.getPriorityCheck(prioPT,prioPTae)
if not Screens.Standby.inStandby and not breakPT: # not already in standby
Notifications.AddNotificationWithCallback(self.sendStandbyNotification, MessageBox, _("A finished powertimer wants to set your\n%s %s to standby. Do that now?") % (getMachineBrand(), getMachineName()), timeout = 180)
return True
elif self.timerType == TIMERTYPE.AUTOSTANDBY:
if debug: print "self.timerType == TIMERTYPE.AUTOSTANDBY:"
if not self.getAutoSleepWindow():
return False
if not Screens.Standby.inStandby: # not already in standby
Notifications.AddNotificationWithCallback(self.sendStandbyNotification, MessageBox, _("A finished powertimer wants to set your\n%s %s to standby. Do that now?") % (getMachineBrand(), getMachineName()), timeout = 180)
if self.autosleeprepeat == "once":
eActionMap.getInstance().unbindAction('', self.keyPressed)
return True
else:
self.begin = self.end = int(now) + int(self.autosleepdelay)*60
else:
self.begin = self.end = int(now) + int(self.autosleepdelay)*60
elif self.timerType == TIMERTYPE.AUTODEEPSTANDBY:
if debug: print "self.timerType == TIMERTYPE.AUTODEEPSTANDBY:"
if not self.getAutoSleepWindow():
return False
if isRecTimerWakeup or (self.autosleepinstandbyonly == 'yes' and not Screens.Standby.inStandby) \
or NavigationInstance.instance.PowerTimer.isProcessing() or abs(NavigationInstance.instance.PowerTimer.getNextPowerManagerTime() - now) <= 900 or self.getNetworkAdress() or self.getNetworkTraffic() \
or NavigationInstance.instance.RecordTimer.isRecording() or abs(NavigationInstance.instance.RecordTimer.getNextRecordingTime() - now) <= 900 or abs(NavigationInstance.instance.RecordTimer.getNextZapTime() - now) <= 900:
self.do_backoff()
# retry
self.begin = self.end = int(now) + self.backoff
return False
elif not Screens.Standby.inTryQuitMainloop: # not a shutdown messagebox is open
if self.autosleeprepeat == "once":
self.disabled = True
if Screens.Standby.inStandby or self.autosleepinstandbyonly == 'noquery': # in standby or option 'without query' is enabled
print "[PowerTimer] quitMainloop #1"
quitMainloop(1)
return True
else:
Notifications.AddNotificationWithCallback(self.sendTryQuitMainloopNotification, MessageBox, _("A finished powertimer wants to shutdown your %s %s.\nDo that now?") % (getMachineBrand(), getMachineName()), timeout = 180)
if self.autosleeprepeat == "once":
eActionMap.getInstance().unbindAction('', self.keyPressed)
return True
else:
self.begin = self.end = int(now) + int(self.autosleepdelay)*60
elif self.timerType == TIMERTYPE.RESTART:
if debug: print "self.timerType == TIMERTYPE.RESTART:"
#check priority
prioPT = [TIMERTYPE.RESTART,TIMERTYPE.REBOOT,TIMERTYPE.DEEPSTANDBY]
prioPTae = [AFTEREVENT.DEEPSTANDBY]
shiftPT,breakPT = self.getPriorityCheck(prioPT,prioPTae)
#a timer with higher priority was shifted - no execution of current timer
if RBsave or aeDSsave or DSsave:
if debug: print "break#1"
breakPT = True
#a timer with lower priority was shifted - shift now current timer and wait for restore the saved time values from other timer
if False:
if debug: print "shift#1"
breakPT = False
shiftPT = True
#shift or break
if isRecTimerWakeup or shiftPT or breakPT \
or NavigationInstance.instance.RecordTimer.isRecording() or abs(NavigationInstance.instance.RecordTimer.getNextRecordingTime() - now) <= 900 or abs(NavigationInstance.instance.RecordTimer.getNextZapTime() - now) <= 900:
if self.repeated and not RSsave:
self.savebegin = self.begin
self.saveend = self.end
RSsave = True
if not breakPT:
self.do_backoff()
#check difference begin to end before shift begin time
if RSsave and self.end - self.begin > 3 and self.end - now - self.backoff <= 240: breakPT = True
#breakPT
if breakPT:
if self.repeated and RSsave:
try:
self.begin = self.savebegin
self.end = self.saveend
except:
pass
RSsave = False
return True
# retry
oldbegin = self.begin
self.begin = int(now) + self.backoff
if abs(self.end - oldbegin) <= 3:
self.end = self.begin
else:
if not self.repeated and self.end < self.begin + 300:
self.end = self.begin + 300
return False
elif not Screens.Standby.inTryQuitMainloop: # not a shutdown messagebox is open
if self.repeated and RSsave:
try:
self.begin = self.savebegin
self.end = self.saveend
except:
pass
if Screens.Standby.inStandby: # in standby
print "[PowerTimer] quitMainloop #4"
quitMainloop(3)
else:
Notifications.AddNotificationWithCallback(self.sendTryToRestartNotification, MessageBox, _("A finished powertimer wants to restart the user interface.\nDo that now?"), timeout = 180)
RSsave = False
return True
elif self.timerType == TIMERTYPE.REBOOT:
if debug: print "self.timerType == TIMERTYPE.REBOOT:"
#check priority
prioPT = [TIMERTYPE.REBOOT,TIMERTYPE.DEEPSTANDBY]
prioPTae = [AFTEREVENT.DEEPSTANDBY]
shiftPT,breakPT = self.getPriorityCheck(prioPT,prioPTae)
#a timer with higher priority was shifted - no execution of current timer
if aeDSsave or DSsave:
if debug: print "break#1"
breakPT = True
#a timer with lower priority was shifted - shift now current timer and wait for restore the saved time values from other timer
if RSsave:
if debug: print "shift#1"
breakPT = False
shiftPT = True
#shift or break
if isRecTimerWakeup or shiftPT or breakPT \
or NavigationInstance.instance.RecordTimer.isRecording() or abs(NavigationInstance.instance.RecordTimer.getNextRecordingTime() - now) <= 900 or abs(NavigationInstance.instance.RecordTimer.getNextZapTime() - now) <= 900:
if self.repeated and not RBsave:
self.savebegin = self.begin
self.saveend = self.end
RBsave = True
if not breakPT:
self.do_backoff()
#check difference begin to end before shift begin time
if RBsave and self.end - self.begin > 3 and self.end - now - self.backoff <= 240: breakPT = True
#breakPT
if breakPT:
if self.repeated and RBsave:
try:
self.begin = self.savebegin
self.end = self.saveend
except:
pass
RBsave = False
return True
# retry
oldbegin = self.begin
self.begin = int(now) + self.backoff
if abs(self.end - oldbegin) <= 3:
self.end = self.begin
else:
if not self.repeated and self.end < self.begin + 300:
self.end = self.begin + 300
return False
elif not Screens.Standby.inTryQuitMainloop: # not a shutdown messagebox is open
if self.repeated and RBsave:
try:
self.begin = self.savebegin
self.end = self.saveend
except:
pass
if Screens.Standby.inStandby: # in standby
print "[PowerTimer] quitMainloop #3"
quitMainloop(2)
else:
Notifications.AddNotificationWithCallback(self.sendTryToRebootNotification, MessageBox, _("A finished powertimer wants to reboot your %s %s.\nDo that now?") % (getMachineBrand(), getMachineName()), timeout = 180)
RBsave = False
return True
elif self.timerType == TIMERTYPE.DEEPSTANDBY:
if debug: print "self.timerType == TIMERTYPE.DEEPSTANDBY:"
#check priority
prioPT = [TIMERTYPE.WAKEUP,TIMERTYPE.WAKEUPTOSTANDBY,TIMERTYPE.DEEPSTANDBY]
prioPTae = [AFTEREVENT.WAKEUP,AFTEREVENT.WAKEUPTOSTANDBY,AFTEREVENT.DEEPSTANDBY]
shiftPT,breakPT = self.getPriorityCheck(prioPT,prioPTae)
#a timer with higher priority was shifted - no execution of current timer
if False:
if debug: print "break#1"
breakPT = True
#a timer with lower priority was shifted - shift now current timer and wait for restore the saved time values from other timer
if RSsave or RBsave or aeDSsave:
if debug: print "shift#1"
breakPT = False
shiftPT = True
#shift or break
if isRecTimerWakeup or shiftPT or breakPT \
or NavigationInstance.instance.RecordTimer.isRecording() or abs(NavigationInstance.instance.RecordTimer.getNextRecordingTime() - now) <= 900 or abs(NavigationInstance.instance.RecordTimer.getNextZapTime() - now) <= 900:
if self.repeated and not DSsave:
self.savebegin = self.begin
self.saveend = self.end
DSsave = True
if not breakPT:
self.do_backoff()
#check difference begin to end before shift begin time
if DSsave and self.end - self.begin > 3 and self.end - now - self.backoff <= 240: breakPT = True
#breakPT
if breakPT:
if self.repeated and DSsave:
try:
self.begin = self.savebegin
self.end = self.saveend
except:
pass
DSsave = False
return True
# retry
oldbegin = self.begin
self.begin = int(now) + self.backoff
if abs(self.end - oldbegin) <= 3:
self.end = self.begin
else:
if not self.repeated and self.end < self.begin + 300:
self.end = self.begin + 300
return False
elif not Screens.Standby.inTryQuitMainloop: # not a shutdown messagebox is open
if self.repeated and DSsave:
try:
self.begin = self.savebegin
self.end = self.saveend
except:
pass
if Screens.Standby.inStandby: # in standby
print "[PowerTimer] quitMainloop #2"
quitMainloop(1)
else:
Notifications.AddNotificationWithCallback(self.sendTryQuitMainloopNotification, MessageBox, _("A finished powertimer wants to shutdown your %s %s.\nDo that now?") % (getMachineBrand(), getMachineName()), timeout = 180)
DSsave = False
return True
elif next_state == self.StateEnded:
if self.afterEvent == AFTEREVENT.WAKEUP:
if Screens.Standby.inStandby:
Screens.Standby.inStandby.Power()
elif self.afterEvent == AFTEREVENT.STANDBY:
if not Screens.Standby.inStandby: # not already in standby
Notifications.AddNotificationWithCallback(self.sendStandbyNotification, MessageBox, _("A finished powertimer wants to set your\n%s %s to standby. Do that now?") % (getMachineBrand(), getMachineName()), timeout = 180)
elif self.afterEvent == AFTEREVENT.DEEPSTANDBY:
if debug: print "self.afterEvent == AFTEREVENT.DEEPSTANDBY:"
#check priority
prioPT = [TIMERTYPE.WAKEUP,TIMERTYPE.WAKEUPTOSTANDBY,TIMERTYPE.DEEPSTANDBY]
prioPTae = [AFTEREVENT.WAKEUP,AFTEREVENT.WAKEUPTOSTANDBY,AFTEREVENT.DEEPSTANDBY]
shiftPT,breakPT = self.getPriorityCheck(prioPT,prioPTae)
#a timer with higher priority was shifted - no execution of current timer
if DSsave:
if debug: print "break#1"
breakPT = True
#a timer with lower priority was shifted - shift now current timer and wait for restore the saved time values
if RSsave or RBsave:
if debug: print "shift#1"
breakPT = False
shiftPT = True
#shift or break
runningPT = False
#option: check other powertimer is running (current disabled)
#runningPT = NavigationInstance.instance.PowerTimer.isProcessing(exceptTimer = TIMERTYPE.NONE, endedTimer = self.timerType)
if isRecTimerWakeup or shiftPT or breakPT or runningPT \
or NavigationInstance.instance.RecordTimer.isRecording() or abs(NavigationInstance.instance.RecordTimer.getNextRecordingTime() - now) <= 900 or abs(NavigationInstance.instance.RecordTimer.getNextZapTime() - now) <= 900:
if self.repeated and not aeDSsave:
self.savebegin = self.begin
self.saveend = self.end
aeDSsave = True
if not breakPT: self.do_backoff()
#breakPT
if breakPT:
if self.repeated and aeDSsave:
try:
self.begin = self.savebegin
self.end = self.saveend
except:
pass
aeDSsave = False
return True
# retry
self.end = int(now) + self.backoff
return False
elif not Screens.Standby.inTryQuitMainloop: # not a shutdown messagebox is open
if self.repeated and aeDSsave:
try:
self.begin = self.savebegin
self.end = self.saveend
except:
pass
if Screens.Standby.inStandby: # in standby
print "[PowerTimer] quitMainloop #5"
quitMainloop(1)
else:
Notifications.AddNotificationWithCallback(self.sendTryQuitMainloopNotification, MessageBox, _("A finished powertimer wants to shutdown your %s %s.\nDo that now?") % (getMachineBrand(), getMachineName()), timeout = 180)
aeDSsave = False
NavigationInstance.instance.PowerTimer.saveTimer()
resetTimerWakeup()
return True
def setAutoincreaseEnd(self, entry = None):
if not self.autoincrease:
return False
if entry is None:
new_end = int(time()) + self.autoincreasetime
else:
new_end = entry.begin - 30
dummyentry = PowerTimerEntry(self.begin, new_end, disabled=True, afterEvent = self.afterEvent, timerType = self.timerType)
dummyentry.disabled = self.disabled
timersanitycheck = TimerSanityCheck(NavigationInstance.instance.PowerManager.timer_list, dummyentry)
if not timersanitycheck.check():
simulTimerList = timersanitycheck.getSimulTimerList()
if simulTimerList is not None and len(simulTimerList) > 1:
new_end = simulTimerList[1].begin
new_end -= 30 # 30 Sekunden Prepare-Zeit lassen
if new_end <= time():
return False
self.end = new_end
return True
def sendStandbyNotification(self, answer):
if answer:
Notifications.AddNotification(Screens.Standby.Standby)
def sendTryQuitMainloopNotification(self, answer):
if answer:
Notifications.AddNotification(Screens.Standby.TryQuitMainloop, 1)
def sendTryToRebootNotification(self, answer):
if answer:
Notifications.AddNotification(Screens.Standby.TryQuitMainloop, 2)
def sendTryToRestartNotification(self, answer):
if answer:
Notifications.AddNotification(Screens.Standby.TryQuitMainloop, 3)
def keyPressed(self, key, tag):
if self.getAutoSleepWindow():
self.begin = self.end = int(time()) + int(self.autosleepdelay)*60
def getAutoSleepWindow(self):
now = time()
if self.autosleepwindow == 'yes':
if now < self.autosleepbegin and now < self.autosleepend:
self.begin = self.autosleepbegin
self.end = self.autosleepend
elif now > self.autosleepbegin and now > self.autosleepend:
while self.autosleepend < now:
self.autosleepend += 86400
while self.autosleepbegin + 86400 < self.autosleepend:
self.autosleepbegin += 86400
self.begin = self.autosleepbegin
self.end = self.autosleepend
if not (now > self.autosleepbegin - self.prepare_time - 3 and now < self.autosleepend):
eActionMap.getInstance().unbindAction('', self.keyPressed)
self.state = 0
self.timeChanged()
return False
return True
def getPriorityCheck(self,prioPT,prioPTae):
shiftPT = breakPT = False
nextPTlist = NavigationInstance.instance.PowerTimer.getNextPowerManagerTime(getNextTimerTyp = True)
for entry in nextPTlist:
#check timers within next 15 mins will started or ended
if abs(entry[0] - time()) > 900:
continue
#faketime
if entry[1] is None and entry[2] is None and entry[3] is None:
if debug: print "shift#2 - entry is faketime", ctime(entry[0]), entry
shiftPT = True
continue
#is timer in list itself?
if entry[0] == self.begin and entry[1] == self.timerType and entry[2] is None and entry[3] == self.state \
or entry[0] == self.end and entry[1] is None and entry[2] == self.afterEvent and entry[3] == self.state:
if debug: print "entry is itself", ctime(entry[0]), entry
nextPTitself = True
else:
nextPTitself = False
if (entry[1] in prioPT or entry[2] in prioPTae) and not nextPTitself:
if debug: print "break#2 <= 900", ctime(entry[0]), entry
breakPT = True
break
return shiftPT, breakPT
def getNextActivation(self):
if self.state == self.StateEnded or self.state == self.StateFailed:
return self.end
next_state = self.state + 1
return {self.StatePrepared: self.start_prepare,
self.StateRunning: self.begin,
self.StateEnded: self.end }[next_state]
def getNextWakeup(self, getNextStbPowerOn = False):
next_state = self.state + 1
if getNextStbPowerOn:
if next_state == 3 and (self.timerType == TIMERTYPE.WAKEUP or self.timerType == TIMERTYPE.WAKEUPTOSTANDBY or self.afterEvent == AFTEREVENT.WAKEUP or self.afterEvent == AFTEREVENT.WAKEUPTOSTANDBY):
if self.start_prepare > time() and (self.timerType == TIMERTYPE.WAKEUP or self.timerType == TIMERTYPE.WAKEUPTOSTANDBY): #timer start time is later as now - begin time was changed while running timer
return self.start_prepare
elif self.begin > time() and (self.timerType == TIMERTYPE.WAKEUP or self.timerType == TIMERTYPE.WAKEUPTOSTANDBY): #timer start time is later as now - begin time was changed while running timer
return self.begin
if self.afterEvent == AFTEREVENT.WAKEUP or self.afterEvent == AFTEREVENT.WAKEUPTOSTANDBY:
return self.end
next_day = 0
count_day = 0
wd_timer = datetime.fromtimestamp(self.begin).isoweekday()*-1
wd_repeated = bin(128+self.repeated)
for s in range(wd_timer-1,-8,-1):
count_day +=1
if int(wd_repeated[s]):
next_day = s
break
if next_day == 0:
for s in range(-1,wd_timer-1,-1):
count_day +=1
if int(wd_repeated[s]):
next_day = s
break
#return self.begin + 86400 * count_day
return self.start_prepare + 86400 * count_day
elif next_state == 2 and (self.timerType == TIMERTYPE.WAKEUP or self.timerType == TIMERTYPE.WAKEUPTOSTANDBY):
return self.begin
elif next_state == 1 and (self.timerType == TIMERTYPE.WAKEUP or self.timerType == TIMERTYPE.WAKEUPTOSTANDBY):
return self.start_prepare
elif next_state < 3 and (self.afterEvent == AFTEREVENT.WAKEUP or self.afterEvent == AFTEREVENT.WAKEUPTOSTANDBY):
return self.end
else:
return -1
if self.state == self.StateEnded or self.state == self.StateFailed:
return self.end
return {self.StatePrepared: self.start_prepare,
self.StateRunning: self.begin,
self.StateEnded: self.end}[next_state]
def timeChanged(self):
old_prepare = self.start_prepare
self.start_prepare = self.begin - self.prepare_time
self.backoff = 0
if int(old_prepare) > 60 and int(old_prepare) != int(self.start_prepare):
self.log(15, "time changed, start prepare is now: %s" % ctime(self.start_prepare))
def getNetworkAdress(self):
ret = False
if self.netip == 'yes':
try:
for ip in self.ipadress.split(','):
if not os.system("ping -q -w1 -c1 " + ip):
ret = True
break
except:
print '[PowerTimer] Error reading ip! -> %s' % self.ipadress
return ret
def getNetworkTraffic(self, getInitialValue = False):
now = time()
newbytes = 0
if self.nettraffic == 'yes':
try:
if os.path.exists('/proc/net/dev'):
f = open('/proc/net/dev', 'r')
temp = f.readlines()
f.close()
for lines in temp:
lisp = lines.split()
if lisp[0].endswith(':') and (lisp[0].startswith('eth') or lisp[0].startswith('wlan')):
newbytes += long(lisp[1]) + long(lisp[9])
if getInitialValue:
self.netbytes = newbytes
self.netbytes_time = now
print '[PowerTimer] Receive/Transmit initialBytes=%d, time is %s' % (self.netbytes, ctime(self.netbytes_time))
return
oldbytes = self.netbytes
seconds = int(now-self.netbytes_time)
self.netbytes = newbytes
self.netbytes_time = now
diffbytes = float(newbytes - oldbytes) * 8 / 1024 / seconds #in kbit/s
if diffbytes < 0:
print '[PowerTimer] Receive/Transmit -> overflow interface counter, waiting for next value'
return True
else:
print '[PowerTimer] Receive/Transmit kilobits per second: %0.2f (%0.2f MByte in %d seconds), actualBytes=%d, time is %s' % (diffbytes, diffbytes/8/1024*seconds, seconds, self.netbytes, ctime(self.netbytes_time))
if diffbytes > self.trafficlimit:
return True
except:
print '[PowerTimer] Receive/Transmit Bytes: Error reading values! Use "cat /proc/net/dev" for testing on command line.'
return False
def createTimer(xml):
timertype = str(xml.get("timertype") or "wakeup")
timertype = {
"nothing": TIMERTYPE.NONE,
"wakeup": TIMERTYPE.WAKEUP,
"wakeuptostandby": TIMERTYPE.WAKEUPTOSTANDBY,
"autostandby": TIMERTYPE.AUTOSTANDBY,
"autodeepstandby": TIMERTYPE.AUTODEEPSTANDBY,
"standby": TIMERTYPE.STANDBY,
"deepstandby": TIMERTYPE.DEEPSTANDBY,
"reboot": TIMERTYPE.REBOOT,
"restart": TIMERTYPE.RESTART
}[timertype]
begin = int(xml.get("begin"))
end = int(xml.get("end"))
repeated = xml.get("repeated").encode("utf-8")
disabled = long(xml.get("disabled") or "0")
afterevent = str(xml.get("afterevent") or "nothing")
afterevent = {
"nothing": AFTEREVENT.NONE,
"wakeup": AFTEREVENT.WAKEUP,
"wakeuptostandby": AFTEREVENT.WAKEUPTOSTANDBY,
"standby": AFTEREVENT.STANDBY,
"deepstandby": AFTEREVENT.DEEPSTANDBY
}[afterevent]
autosleepinstandbyonly = str(xml.get("autosleepinstandbyonly") or "no")
autosleepdelay = str(xml.get("autosleepdelay") or "0")
autosleeprepeat = str(xml.get("autosleeprepeat") or "once")
autosleepwindow = str(xml.get("autosleepwindow") or "no")
autosleepbegin = int(xml.get("autosleepbegin") or begin)
autosleepend = int(xml.get("autosleepend") or end)
nettraffic = str(xml.get("nettraffic") or "no")
trafficlimit = int(xml.get("trafficlimit") or 100)
netip = str(xml.get("netip") or "no")
ipadress = str(xml.get("ipadress") or "0.0.0.0")
entry = PowerTimerEntry(begin, end, disabled, afterevent, timertype)
entry.repeated = int(repeated)
entry.autosleepinstandbyonly = autosleepinstandbyonly
entry.autosleepdelay = int(autosleepdelay)
entry.autosleeprepeat = autosleeprepeat
entry.autosleepwindow = autosleepwindow
entry.autosleepbegin = autosleepbegin
entry.autosleepend = autosleepend
entry.nettraffic = nettraffic
entry.trafficlimit = trafficlimit
entry.netip = netip
entry.ipadress = ipadress
for l in xml.findall("log"):
ltime = int(l.get("time"))
code = int(l.get("code"))
msg = l.text.strip().encode("utf-8")
entry.log_entries.append((ltime, code, msg))
return entry
class PowerTimer(timer.Timer):
def __init__(self):
timer.Timer.__init__(self)
self.Filename = Directories.resolveFilename(Directories.SCOPE_CONFIG, "pm_timers.xml")
try:
self.loadTimer()
except IOError:
print "unable to load timers from file!"
def doActivate(self, w):
# when activating a timer which has already passed,
# simply abort the timer. don't run trough all the stages.
if w.shouldSkip():
w.state = PowerTimerEntry.StateEnded
else:
# when active returns true, this means "accepted".
# otherwise, the current state is kept.
# the timer entry itself will fix up the delay then.
if w.activate():
w.state += 1
try:
self.timer_list.remove(w)
except:
print '[PowerManager]: Remove list failed'
# did this timer reached the last state?
if w.state < PowerTimerEntry.StateEnded:
# no, sort it into active list
insort(self.timer_list, w)
else:
# yes. Process repeated, and re-add.
if w.repeated:
w.processRepeated()
w.state = PowerTimerEntry.StateWaiting
self.addTimerEntry(w)
else:
# Remove old timers as set in config
self.cleanupDaily(config.recording.keep_timers.value)
insort(self.processed_timers, w)
self.stateChanged(w)
def loadTimer(self):
# TODO: PATH!
if not Directories.fileExists(self.Filename):
return
try:
file = open(self.Filename, 'r')
doc = xml.etree.cElementTree.parse(file)
file.close()
except SyntaxError:
from Tools.Notifications import AddPopup
from Screens.MessageBox import MessageBox
AddPopup(_("The timer file (pm_timers.xml) is corrupt and could not be loaded."), type = MessageBox.TYPE_ERROR, timeout = 0, id = "TimerLoadFailed")
print "pm_timers.xml failed to load!"
try:
import os
os.rename(self.Filename, self.Filename + "_old")
except (IOError, OSError):
print "renaming broken timer failed"
return
except IOError:
print "pm_timers.xml not found!"
return
root = doc.getroot()
# put out a message when at least one timer overlaps
checkit = True
for timer in root.findall("timer"):
newTimer = createTimer(timer)
if (self.record(newTimer, True, dosave=False) is not None) and (checkit == True):
from Tools.Notifications import AddPopup
from Screens.MessageBox import MessageBox
AddPopup(_("Timer overlap in pm_timers.xml detected!\nPlease recheck it!"), type = MessageBox.TYPE_ERROR, timeout = 0, id = "TimerLoadFailed")
checkit = False # at moment it is enough when the message is displayed one time
def saveTimer(self):
savedays = 3600 * 24 * 7 #logs older 7 Days will not saved
list = ['<?xml version="1.0" ?>\n', '<timers>\n']
for timer in self.timer_list + self.processed_timers:
if timer.dontSave:
continue
list.append('<timer')
list.append(' timertype="' + str(stringToXML({
TIMERTYPE.NONE: "nothing",
TIMERTYPE.WAKEUP: "wakeup",
TIMERTYPE.WAKEUPTOSTANDBY: "wakeuptostandby",
TIMERTYPE.AUTOSTANDBY: "autostandby",
TIMERTYPE.AUTODEEPSTANDBY: "autodeepstandby",
TIMERTYPE.STANDBY: "standby",
TIMERTYPE.DEEPSTANDBY: "deepstandby",
TIMERTYPE.REBOOT: "reboot",
TIMERTYPE.RESTART: "restart"
}[timer.timerType])) + '"')
list.append(' begin="' + str(int(timer.begin)) + '"')
list.append(' end="' + str(int(timer.end)) + '"')
list.append(' repeated="' + str(int(timer.repeated)) + '"')
list.append(' afterevent="' + str(stringToXML({
AFTEREVENT.NONE: "nothing",
AFTEREVENT.WAKEUP: "wakeup",
AFTEREVENT.WAKEUPTOSTANDBY: "wakeuptostandby",
AFTEREVENT.STANDBY: "standby",
AFTEREVENT.DEEPSTANDBY: "deepstandby"
}[timer.afterEvent])) + '"')
list.append(' disabled="' + str(int(timer.disabled)) + '"')
list.append(' autosleepinstandbyonly="' + str(timer.autosleepinstandbyonly) + '"')
list.append(' autosleepdelay="' + str(timer.autosleepdelay) + '"')
list.append(' autosleeprepeat="' + str(timer.autosleeprepeat) + '"')
list.append(' autosleepwindow="' + str(timer.autosleepwindow) + '"')
list.append(' autosleepbegin="' + str(int(timer.autosleepbegin)) + '"')
list.append(' autosleepend="' + str(int(timer.autosleepend)) + '"')
list.append(' nettraffic="' + str(timer.nettraffic) + '"')
list.append(' trafficlimit="' + str(int(timer.trafficlimit)) + '"')
list.append(' netip="' + str(timer.netip) + '"')
list.append(' ipadress="' + str(timer.ipadress) + '"')
list.append('>\n')
for ltime, code, msg in timer.log_entries:
if ltime > time() - savedays:
list.append('<log')
list.append(' code="' + str(code) + '"')
list.append(' time="' + str(ltime) + '"')
list.append('>')
list.append(str(stringToXML(msg)))
list.append('</log>\n')
list.append('</timer>\n')
list.append('</timers>\n')
file = open(self.Filename + ".writing", "w")
for x in list:
file.write(x)
file.flush()
os.fsync(file.fileno())
file.close()
os.rename(self.Filename + ".writing", self.Filename)
def isAutoDeepstandbyEnabled(self):
ret = True
now = time()
for timer in self.timer_list:
if timer.timerType == TIMERTYPE.AUTODEEPSTANDBY:
if timer.begin <= now + 900:
ret = not (timer.getNetworkTraffic() or timer.getNetworkAdress())
elif timer.autosleepwindow == 'yes':
ret = timer.autosleepbegin <= now + 900
if not ret:
break
return ret
def isProcessing(self, exceptTimer = None, endedTimer = None):
isRunning = False
for timer in self.timer_list:
if timer.timerType != TIMERTYPE.AUTOSTANDBY and timer.timerType != TIMERTYPE.AUTODEEPSTANDBY and timer.timerType != exceptTimer and timer.timerType != endedTimer:
if timer.isRunning():
isRunning = True
break
return isRunning
def getNextZapTime(self):
now = time()
for timer in self.timer_list:
if timer.begin < now:
continue
return timer.begin
return -1
def getNextPowerManagerTimeOld(self, getNextStbPowerOn = False):
now = int(time())
nextPTlist = [(-1,None,None,None)]
for timer in self.timer_list:
if timer.timerType != TIMERTYPE.AUTOSTANDBY and timer.timerType != TIMERTYPE.AUTODEEPSTANDBY:
next_act = timer.getNextWakeup(getNextStbPowerOn)
if next_act + 3 < now:
continue
if getNextStbPowerOn and debug:
print "[powertimer] next stb power up", strftime("%a, %Y/%m/%d %H:%M", localtime(next_act))
next_timertype = next_afterevent = None
if nextPTlist[0][0] == -1:
if abs(next_act - timer.begin) <= 30:
next_timertype = timer.timerType
elif abs(next_act - timer.end) <= 30:
next_afterevent = timer.afterEvent
nextPTlist = [(next_act,next_timertype,next_afterevent,timer.state)]
else:
if abs(next_act - timer.begin) <= 30:
next_timertype = timer.timerType
elif abs(next_act - timer.end) <= 30:
next_afterevent = timer.afterEvent
nextPTlist.append((next_act,next_timertype,next_afterevent,timer.state))
nextPTlist.sort()
return nextPTlist
def getNextPowerManagerTime(self, getNextStbPowerOn = False, getNextTimerTyp = False):
#getNextStbPowerOn = True returns tuple -> (timer.begin, set standby)
#getNextTimerTyp = True returns next timer list -> [(timer.begin, timer.timerType, timer.afterEvent, timer.state)]
global DSsave, RSsave, RBsave, aeDSsave
nextrectime = self.getNextPowerManagerTimeOld(getNextStbPowerOn)
faketime = int(time()) + 300
if getNextStbPowerOn:
if config.timeshift.isRecording.value:
if 0 < nextrectime[0][0] < faketime:
return nextrectime[0][0], int(nextrectime[0][1] == 2 or nextrectime[0][2] == 2)
else:
return faketime, 0
else:
return nextrectime[0][0], int(nextrectime[0][1] == 2 or nextrectime[0][2] == 2)
elif getNextTimerTyp:
#check entrys and plausibility of shift state (manual canceled timer has shift/save state not reset)
tt = ae = []
now = time()
if debug: print "+++++++++++++++"
for entry in nextrectime:
if entry[0] < now + 900: tt.append(entry[1])
if entry[0] < now + 900: ae.append(entry[2])
if debug: print ctime(entry[0]), entry
if not TIMERTYPE.RESTART in tt: RSsave = False
if not TIMERTYPE.REBOOT in tt: RBsave = False
if not TIMERTYPE.DEEPSTANDBY in tt: DSsave = False
if not AFTEREVENT.DEEPSTANDBY in ae: aeDSsave = False
if debug: print "RSsave=%s, RBsave=%s, DSsave=%s, aeDSsave=%s, wasTimerWakeup=%s" %(RSsave, RBsave, DSsave, aeDSsave, wasTimerWakeup)
if debug: print "+++++++++++++++"
###
if config.timeshift.isRecording.value:
if 0 < nextrectime[0][0] < faketime:
return nextrectime
else:
nextrectime.append((faketime,None,None,None))
nextrectime.sort()
return nextrectime
else:
return nextrectime
else:
if config.timeshift.isRecording.value:
if 0 < nextrectime[0][0] < faketime:
return nextrectime[0][0]
else:
return faketime
else:
return nextrectime[0][0]
def isNextPowerManagerAfterEventActionAuto(self):
for timer in self.timer_list:
if timer.timerType == TIMERTYPE.WAKEUPTOSTANDBY or timer.afterEvent == AFTEREVENT.WAKEUPTOSTANDBY or timer.timerType == TIMERTYPE.WAKEUP or timer.afterEvent == AFTEREVENT.WAKEUP:
return True
return False
def record(self, entry, ignoreTSC=False, dosave=True): #wird von loadTimer mit dosave=False aufgerufen
entry.timeChanged()
print "[PowerTimer]",str(entry)
entry.Timer = self
self.addTimerEntry(entry)
if dosave:
self.saveTimer()
return None
def removeEntry(self, entry):
print "[PowerTimer] Remove",str(entry)
# avoid re-enqueuing
entry.repeated = False
# abort timer.
# this sets the end time to current time, so timer will be stopped.
entry.autoincrease = False
entry.abort()
if entry.state != entry.StateEnded:
self.timeChanged(entry)
# print "state: ", entry.state
# print "in processed: ", entry in self.processed_timers
# print "in running: ", entry in self.timer_list
# disable timer first
if entry.state != 3:
entry.disable()
# autoincrease instanttimer if possible
if not entry.dontSave:
for x in self.timer_list:
if x.setAutoincreaseEnd():
self.timeChanged(x)
# now the timer should be in the processed_timers list. remove it from there.
if entry in self.processed_timers:
self.processed_timers.remove(entry)
self.saveTimer()
def shutdown(self):
self.saveTimer()
|
popazerty/openhdf-enigma2
|
PowerTimer.py
|
Python
|
gpl-2.0
| 41,102
|
[
"ASE"
] |
c128a780f00f0178ac3305fc42d3f5db70b53bf564faf1b8d89f63c73d833e94
|
"""
Utility functions for transcripts.
++++++++++++++++++++++++++++++++++
"""
import os
import copy
import json
import requests
import logging
from pysrt import SubRipTime, SubRipItem, SubRipFile
from lxml import etree
from HTMLParser import HTMLParser
from xmodule.exceptions import NotFoundError
from xmodule.contentstore.content import StaticContent
from xmodule.contentstore.django import contentstore
from .bumper_utils import get_bumper_settings
log = logging.getLogger(__name__)
class TranscriptException(Exception): # pylint: disable=missing-docstring
pass
class TranscriptsGenerationException(Exception): # pylint: disable=missing-docstring
pass
class GetTranscriptsFromYouTubeException(Exception): # pylint: disable=missing-docstring
pass
class TranscriptsRequestValidationException(Exception): # pylint: disable=missing-docstring
pass
def generate_subs(speed, source_speed, source_subs):
"""
Generate transcripts from one speed to another speed.
Args:
`speed`: float, for this speed subtitles will be generated,
`source_speed`: float, speed of source_subs
`source_subs`: dict, existing subtitles for speed `source_speed`.
Returns:
`subs`: dict, actual subtitles.
"""
if speed == source_speed:
return source_subs
coefficient = 1.0 * speed / source_speed
subs = {
'start': [
int(round(timestamp * coefficient)) for
timestamp in source_subs['start']
],
'end': [
int(round(timestamp * coefficient)) for
timestamp in source_subs['end']
],
'text': source_subs['text']}
return subs
def save_to_store(content, name, mime_type, location):
"""
Save named content to store by location.
Returns location of saved content.
"""
content_location = Transcript.asset_location(location, name)
content = StaticContent(content_location, name, mime_type, content)
contentstore().save(content)
return content_location
def save_subs_to_store(subs, subs_id, item, language='en'):
"""
Save transcripts into `StaticContent`.
Args:
`subs_id`: str, subtitles id
`item`: video module instance
`language`: two chars str ('uk'), language of translation of transcripts
Returns: location of saved subtitles.
"""
filedata = json.dumps(subs, indent=2)
filename = subs_filename(subs_id, language)
return save_to_store(filedata, filename, 'application/json', item.location)
def youtube_video_transcript_name(youtube_text_api):
"""
Get the transcript name from available transcripts of video
with respect to language from youtube server
"""
utf8_parser = etree.XMLParser(encoding='utf-8')
transcripts_param = {'type': 'list', 'v': youtube_text_api['params']['v']}
lang = youtube_text_api['params']['lang']
# get list of transcripts of specific video
# url-form
# http://video.google.com/timedtext?type=list&v={VideoId}
youtube_response = requests.get('http://' + youtube_text_api['url'], params=transcripts_param)
if youtube_response.status_code == 200 and youtube_response.text:
youtube_data = etree.fromstring(youtube_response.content, parser=utf8_parser)
# iterate all transcripts information from youtube server
for element in youtube_data:
# search specific language code such as 'en' in transcripts info list
if element.tag == 'track' and element.get('lang_code', '') == lang:
return element.get('name')
return None
def get_transcripts_from_youtube(youtube_id, settings, i18n, youtube_transcript_name=''):
"""
Gets transcripts from youtube for youtube_id.
Parses only utf-8 encoded transcripts.
Other encodings are not supported at the moment.
Returns (status, transcripts): bool, dict.
"""
_ = i18n.ugettext
utf8_parser = etree.XMLParser(encoding='utf-8')
youtube_text_api = copy.deepcopy(settings.YOUTUBE['TEXT_API'])
youtube_text_api['params']['v'] = youtube_id
# if the transcript name is not empty on youtube server we have to pass
# name param in url in order to get transcript
# example http://video.google.com/timedtext?lang=en&v={VideoId}&name={transcript_name}
youtube_transcript_name = youtube_video_transcript_name(youtube_text_api)
if youtube_transcript_name:
youtube_text_api['params']['name'] = youtube_transcript_name
data = requests.get('http://' + youtube_text_api['url'], params=youtube_text_api['params'])
if data.status_code != 200 or not data.text:
msg = _("Can't receive transcripts from Youtube for {youtube_id}. Status code: {status_code}.").format(
youtube_id=youtube_id,
status_code=data.status_code
)
raise GetTranscriptsFromYouTubeException(msg)
sub_starts, sub_ends, sub_texts = [], [], []
xmltree = etree.fromstring(data.content, parser=utf8_parser)
for element in xmltree:
if element.tag == "text":
start = float(element.get("start"))
duration = float(element.get("dur", 0)) # dur is not mandatory
text = element.text
end = start + duration
if text:
# Start and end should be ints representing the millisecond timestamp.
sub_starts.append(int(start * 1000))
sub_ends.append(int((end + 0.0001) * 1000))
sub_texts.append(text.replace('\n', ' '))
return {'start': sub_starts, 'end': sub_ends, 'text': sub_texts}
def download_youtube_subs(youtube_id, video_descriptor, settings):
"""
Download transcripts from Youtube and save them to assets.
Args:
youtube_id: str, actual youtube_id of the video.
video_descriptor: video descriptor instance.
We save transcripts for 1.0 speed, as for other speed conversion is done on front-end.
Returns:
None, if transcripts were successfully downloaded and saved.
Raises:
GetTranscriptsFromYouTubeException, if fails.
"""
i18n = video_descriptor.runtime.service(video_descriptor, "i18n")
_ = i18n.ugettext
subs = get_transcripts_from_youtube(youtube_id, settings, i18n)
save_subs_to_store(subs, youtube_id, video_descriptor)
log.info("Transcripts for youtube_id %s for 1.0 speed are downloaded and saved.", youtube_id)
def remove_subs_from_store(subs_id, item, lang='en'):
"""
Remove from store, if transcripts content exists.
"""
filename = subs_filename(subs_id, lang)
Transcript.delete_asset(item.location, filename)
def generate_subs_from_source(speed_subs, subs_type, subs_filedata, item, language='en'):
"""Generate transcripts from source files (like SubRip format, etc.)
and save them to assets for `item` module.
We expect, that speed of source subs equal to 1
:param speed_subs: dictionary {speed: sub_id, ...}
:param subs_type: type of source subs: "srt", ...
:param subs_filedata:unicode, content of source subs.
:param item: module object.
:param language: str, language of translation of transcripts
:returns: True, if all subs are generated and saved successfully.
"""
_ = item.runtime.service(item, "i18n").ugettext
if subs_type.lower() != 'srt':
raise TranscriptsGenerationException(_("We support only SubRip (*.srt) transcripts format."))
try:
srt_subs_obj = SubRipFile.from_string(subs_filedata)
except Exception as ex:
msg = _("Something wrong with SubRip transcripts file during parsing. Inner message is {error_message}").format(
error_message=ex.message
)
raise TranscriptsGenerationException(msg)
if not srt_subs_obj:
raise TranscriptsGenerationException(_("Something wrong with SubRip transcripts file during parsing."))
sub_starts = []
sub_ends = []
sub_texts = []
for sub in srt_subs_obj:
sub_starts.append(sub.start.ordinal)
sub_ends.append(sub.end.ordinal)
sub_texts.append(sub.text.replace('\n', ' '))
subs = {
'start': sub_starts,
'end': sub_ends,
'text': sub_texts}
for speed, subs_id in speed_subs.iteritems():
save_subs_to_store(
generate_subs(speed, 1, subs),
subs_id,
item,
language
)
return subs
def generate_srt_from_sjson(sjson_subs, speed):
"""Generate transcripts with speed = 1.0 from sjson to SubRip (*.srt).
:param sjson_subs: "sjson" subs.
:param speed: speed of `sjson_subs`.
:returns: "srt" subs.
"""
output = ''
equal_len = len(sjson_subs['start']) == len(sjson_subs['end']) == len(sjson_subs['text'])
if not equal_len:
return output
sjson_speed_1 = generate_subs(speed, 1, sjson_subs)
for i in range(len(sjson_speed_1['start'])):
item = SubRipItem(
index=i,
start=SubRipTime(milliseconds=sjson_speed_1['start'][i]),
end=SubRipTime(milliseconds=sjson_speed_1['end'][i]),
text=sjson_speed_1['text'][i]
)
output += (unicode(item))
output += '\n'
return output
def copy_or_rename_transcript(new_name, old_name, item, delete_old=False, user=None):
"""
Renames `old_name` transcript file in storage to `new_name`.
If `old_name` is not found in storage, raises `NotFoundError`.
If `delete_old` is True, removes `old_name` files from storage.
"""
filename = 'subs_{0}.srt.sjson'.format(old_name)
content_location = StaticContent.compute_location(item.location.course_key, filename)
transcripts = contentstore().find(content_location).data
save_subs_to_store(json.loads(transcripts), new_name, item)
item.sub = new_name
item.save_with_metadata(user)
if delete_old:
remove_subs_from_store(old_name, item)
def get_html5_ids(html5_sources):
"""
Helper method to parse out an HTML5 source into the ideas
NOTE: This assumes that '/' are not in the filename
"""
html5_ids = [x.split('/')[-1].rsplit('.', 1)[0] for x in html5_sources]
return html5_ids
def manage_video_subtitles_save(item, user, old_metadata=None, generate_translation=False):
"""
Does some specific things, that can be done only on save.
Video player item has some video fields: HTML5 ones and Youtube one.
If value of `sub` field of `new_item` is cleared, transcripts should be removed.
`item` is video module instance with updated values of fields,
but actually have not been saved to store yet.
`old_metadata` contains old values of XFields.
# 1.
If value of `sub` field of `new_item` is different from values of video fields of `new_item`,
and `new_item.sub` file is present, then code in this function creates copies of
`new_item.sub` file with new names. That names are equal to values of video fields of `new_item`
After that `sub` field of `new_item` is changed to one of values of video fields.
This whole action ensures that after user changes video fields, proper `sub` files, corresponding
to new values of video fields, will be presented in system.
# 2 convert /static/filename.srt to filename.srt in self.transcripts.
(it is done to allow user to enter both /static/filename.srt and filename.srt)
# 3. Generate transcripts translation only when user clicks `save` button, not while switching tabs.
a) delete sjson translation for those languages, which were removed from `item.transcripts`.
Note: we are not deleting old SRT files to give user more flexibility.
b) For all SRT files in`item.transcripts` regenerate new SJSON files.
(To avoid confusing situation if you attempt to correct a translation by uploading
a new version of the SRT file with same name).
"""
_ = item.runtime.service(item, "i18n").ugettext
# 1.
html5_ids = get_html5_ids(item.html5_sources)
possible_video_id_list = [item.youtube_id_1_0] + html5_ids
sub_name = item.sub
for video_id in possible_video_id_list:
if not video_id:
continue
if not sub_name:
remove_subs_from_store(video_id, item)
continue
# copy_or_rename_transcript changes item.sub of module
try:
# updates item.sub with `video_id`, if it is successful.
copy_or_rename_transcript(video_id, sub_name, item, user=user)
except NotFoundError:
# subtitles file `sub_name` is not presented in the system. Nothing to copy or rename.
log.debug(
"Copying %s file content to %s name is failed, "
"original file does not exist.",
sub_name, video_id
)
# 2.
if generate_translation:
for lang, filename in item.transcripts.items():
item.transcripts[lang] = os.path.split(filename)[-1]
# 3.
if generate_translation:
old_langs = set(old_metadata.get('transcripts', {})) if old_metadata else set()
new_langs = set(item.transcripts)
for lang in old_langs.difference(new_langs): # 3a
for video_id in possible_video_id_list:
if video_id:
remove_subs_from_store(video_id, item, lang)
reraised_message = ''
for lang in new_langs: # 3b
try:
generate_sjson_for_all_speeds(
item,
item.transcripts[lang],
{speed: subs_id for subs_id, speed in youtube_speed_dict(item).iteritems()},
lang,
)
except TranscriptException as ex:
# remove key from transcripts because proper srt file does not exist in assets.
item.transcripts.pop(lang)
reraised_message += ' ' + ex.message
if reraised_message:
item.save_with_metadata(user)
raise TranscriptException(reraised_message)
def youtube_speed_dict(item):
"""
Returns {speed: youtube_ids, ...} dict for existing youtube_ids
"""
yt_ids = [item.youtube_id_0_75, item.youtube_id_1_0, item.youtube_id_1_25, item.youtube_id_1_5]
yt_speeds = [0.75, 1.00, 1.25, 1.50]
youtube_ids = {p[0]: p[1] for p in zip(yt_ids, yt_speeds) if p[0]}
return youtube_ids
def subs_filename(subs_id, lang='en'):
"""
Generate proper filename for storage.
"""
if lang == 'en':
return u'subs_{0}.srt.sjson'.format(subs_id)
else:
return u'{0}_subs_{1}.srt.sjson'.format(lang, subs_id)
def generate_sjson_for_all_speeds(item, user_filename, result_subs_dict, lang):
"""
Generates sjson from srt for given lang.
`item` is module object.
"""
_ = item.runtime.service(item, "i18n").ugettext
try:
srt_transcripts = contentstore().find(Transcript.asset_location(item.location, user_filename))
except NotFoundError as ex:
raise TranscriptException(_("{exception_message}: Can't find uploaded transcripts: {user_filename}").format(
exception_message=ex.message,
user_filename=user_filename
))
if not lang:
lang = item.transcript_language
# Used utf-8-sig encoding type instead of utf-8 to remove BOM(Byte Order Mark), e.g. U+FEFF
generate_subs_from_source(
result_subs_dict,
os.path.splitext(user_filename)[1][1:],
srt_transcripts.data.decode('utf-8-sig'),
item,
lang
)
def get_or_create_sjson(item, transcripts):
"""
Get sjson if already exists, otherwise generate it.
Generate sjson with subs_id name, from user uploaded srt.
Subs_id is extracted from srt filename, which was set by user.
Args:
transcipts (dict): dictionary of (language: file) pairs.
Raises:
TranscriptException: when srt subtitles do not exist,
and exceptions from generate_subs_from_source.
`item` is module object.
"""
user_filename = transcripts[item.transcript_language]
user_subs_id = os.path.splitext(user_filename)[0]
source_subs_id, result_subs_dict = user_subs_id, {1.0: user_subs_id}
try:
sjson_transcript = Transcript.asset(item.location, source_subs_id, item.transcript_language).data
except NotFoundError: # generating sjson from srt
generate_sjson_for_all_speeds(item, user_filename, result_subs_dict, item.transcript_language)
sjson_transcript = Transcript.asset(item.location, source_subs_id, item.transcript_language).data
return sjson_transcript
class Transcript(object):
"""
Container for transcript methods.
"""
mime_types = {
'srt': 'application/x-subrip; charset=utf-8',
'txt': 'text/plain; charset=utf-8',
'sjson': 'application/json',
}
@staticmethod
def convert(content, input_format, output_format):
"""
Convert transcript `content` from `input_format` to `output_format`.
Accepted input formats: sjson, srt.
Accepted output format: srt, txt.
"""
assert input_format in ('srt', 'sjson')
assert output_format in ('txt', 'srt', 'sjson')
if input_format == output_format:
return content
if input_format == 'srt':
if output_format == 'txt':
text = SubRipFile.from_string(content.decode('utf8')).text
return HTMLParser().unescape(text)
elif output_format == 'sjson':
raise NotImplementedError
if input_format == 'sjson':
if output_format == 'txt':
text = json.loads(content)['text']
return HTMLParser().unescape("\n".join(text))
elif output_format == 'srt':
return generate_srt_from_sjson(json.loads(content), speed=1.0)
@staticmethod
def asset(location, subs_id, lang='en', filename=None):
"""
Get asset from contentstore, asset location is built from subs_id and lang.
`location` is module location.
"""
asset_filename = subs_filename(subs_id, lang) if not filename else filename
return Transcript.get_asset(location, asset_filename)
@staticmethod
def get_asset(location, filename):
"""
Return asset by location and filename.
"""
return contentstore().find(Transcript.asset_location(location, filename))
@staticmethod
def asset_location(location, filename):
"""
Return asset location. `location` is module location.
"""
return StaticContent.compute_location(location.course_key, filename)
@staticmethod
def delete_asset(location, filename):
"""
Delete asset by location and filename.
"""
try:
contentstore().delete(Transcript.asset_location(location, filename))
log.info("Transcript asset %s was removed from store.", filename)
except NotFoundError:
pass
return StaticContent.compute_location(location.course_key, filename)
class VideoTranscriptsMixin(object):
"""Mixin class for transcript functionality.
This is necessary for both VideoModule and VideoDescriptor.
"""
def available_translations(self, transcripts, verify_assets=True):
"""Return a list of language codes for which we have transcripts.
Args:
verify_assets (boolean): If True, checks to ensure that the transcripts
really exist in the contentstore. If False, we just look at the
VideoDescriptor fields and do not query the contentstore. One reason
we might do this is to avoid slamming contentstore() with queries
when trying to make a listing of videos and their languages.
Defaults to True.
transcripts (dict): A dict with all transcripts and a sub.
Defaults to False
"""
translations = []
sub, other_lang = transcripts["sub"], transcripts["transcripts"]
# If we're not verifying the assets, we just trust our field values
if not verify_assets:
translations = list(other_lang)
if not translations or sub:
translations += ['en']
return set(translations)
# If we've gotten this far, we're going to verify that the transcripts
# being referenced are actually in the contentstore.
if sub: # check if sjson exists for 'en'.
try:
Transcript.asset(self.location, sub, 'en')
except NotFoundError:
try:
Transcript.asset(self.location, None, None, sub)
except NotFoundError:
pass
else:
translations = ['en']
else:
translations = ['en']
for lang in other_lang:
try:
Transcript.asset(self.location, None, None, other_lang[lang])
except NotFoundError:
continue
translations.append(lang)
return translations
def get_transcript(self, transcripts, transcript_format='srt', lang=None):
"""
Returns transcript, filename and MIME type.
transcripts (dict): A dict with all transcripts and a sub.
Raises:
- NotFoundError if cannot find transcript file in storage.
- ValueError if transcript file is empty or incorrect JSON.
- KeyError if transcript file has incorrect format.
If language is 'en', self.sub should be correct subtitles name.
If language is 'en', but if self.sub is not defined, this means that we
should search for video name in order to get proper transcript (old style courses).
If language is not 'en', give back transcript in proper language and format.
"""
if not lang:
lang = self.get_default_transcript_language(transcripts)
sub, other_lang = transcripts["sub"], transcripts["transcripts"]
if lang == 'en':
if sub: # HTML5 case and (Youtube case for new style videos)
transcript_name = sub
elif self.youtube_id_1_0: # old courses
transcript_name = self.youtube_id_1_0
else:
log.debug("No subtitles for 'en' language")
raise ValueError
data = Transcript.asset(self.location, transcript_name, lang).data
filename = u'{}.{}'.format(transcript_name, transcript_format)
content = Transcript.convert(data, 'sjson', transcript_format)
else:
data = Transcript.asset(self.location, None, None, other_lang[lang]).data
filename = u'{}.{}'.format(os.path.splitext(other_lang[lang])[0], transcript_format)
content = Transcript.convert(data, 'srt', transcript_format)
if not content:
log.debug('no subtitles produced in get_transcript')
raise ValueError
return content, filename, Transcript.mime_types[transcript_format]
def get_default_transcript_language(self, transcripts):
"""
Returns the default transcript language for this video module.
Args:
transcripts (dict): A dict with all transcripts and a sub.
"""
sub, other_lang = transcripts["sub"], transcripts["transcripts"]
if self.transcript_language in other_lang:
transcript_language = self.transcript_language
elif sub:
transcript_language = u'en'
elif len(other_lang) > 0:
transcript_language = sorted(other_lang)[0]
else:
transcript_language = u'en'
return transcript_language
def get_transcripts_info(self, is_bumper=False):
"""
Returns a transcript dictionary for the video.
"""
if is_bumper:
transcripts = copy.deepcopy(get_bumper_settings(self).get('transcripts', {}))
return {
"sub": transcripts.pop("en", ""),
"transcripts": transcripts,
}
else:
return {
"sub": self.sub,
"transcripts": self.transcripts,
}
|
ahmadiga/min_edx
|
common/lib/xmodule/xmodule/video_module/transcripts_utils.py
|
Python
|
agpl-3.0
| 24,420
|
[
"FEFF"
] |
3c868b7d5a45a73dd534182b5c104a3fefb59caa2317e899f1561dc366463256
|
# -*- coding: utf-8 -*-
# Authors: Alexandre Gramfort <alexandre.gramfort@inria.fr>
# Mathieu Blondel <mathieu@mblondel.org>
# Robert Layton <robertlayton@gmail.com>
# Andreas Mueller <amueller@ais.uni-bonn.de>
# Philippe Gervais <philippe.gervais@inria.fr>
# Lars Buitinck
# Joel Nothman <joel.nothman@gmail.com>
# License: BSD 3 clause
import itertools
from functools import partial
import warnings
import numpy as np
from scipy.spatial import distance
from scipy.sparse import csr_matrix
from scipy.sparse import issparse
from joblib import Parallel, effective_n_jobs
from .. import config_context
from ..utils.validation import _num_samples
from ..utils.validation import check_non_negative
from ..utils import check_array
from ..utils import gen_even_slices
from ..utils import gen_batches, get_chunk_n_rows
from ..utils import is_scalar_nan
from ..utils.extmath import row_norms, safe_sparse_dot
from ..preprocessing import normalize
from ..utils._mask import _get_mask
from ..utils.fixes import delayed
from ..utils.fixes import sp_version, parse_version
from ._pairwise_distances_reduction import PairwiseDistancesArgKmin
from ._pairwise_fast import _chi2_kernel_fast, _sparse_manhattan
from ..exceptions import DataConversionWarning
# Utility Functions
def _return_float_dtype(X, Y):
"""
1. If dtype of X and Y is float32, then dtype float32 is returned.
2. Else dtype float is returned.
"""
if not issparse(X) and not isinstance(X, np.ndarray):
X = np.asarray(X)
if Y is None:
Y_dtype = X.dtype
elif not issparse(Y) and not isinstance(Y, np.ndarray):
Y = np.asarray(Y)
Y_dtype = Y.dtype
else:
Y_dtype = Y.dtype
if X.dtype == Y_dtype == np.float32:
dtype = np.float32
else:
dtype = float
return X, Y, dtype
def check_pairwise_arrays(
X,
Y,
*,
precomputed=False,
dtype=None,
accept_sparse="csr",
force_all_finite=True,
copy=False,
):
"""Set X and Y appropriately and checks inputs.
If Y is None, it is set as a pointer to X (i.e. not a copy).
If Y is given, this does not happen.
All distance metrics should use this function first to assert that the
given parameters are correct and safe to use.
Specifically, this function first ensures that both X and Y are arrays,
then checks that they are at least two dimensional while ensuring that
their elements are floats (or dtype if provided). Finally, the function
checks that the size of the second dimension of the two arrays is equal, or
the equivalent check for a precomputed distance matrix.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples_X, n_features)
Y : {array-like, sparse matrix} of shape (n_samples_Y, n_features)
precomputed : bool, default=False
True if X is to be treated as precomputed distances to the samples in
Y.
dtype : str, type, list of type, default=None
Data type required for X and Y. If None, the dtype will be an
appropriate float type selected by _return_float_dtype.
.. versionadded:: 0.18
accept_sparse : str, bool or list/tuple of str, default='csr'
String[s] representing allowed sparse matrix formats, such as 'csc',
'csr', etc. If the input is sparse but not in the allowed format,
it will be converted to the first listed format. True allows the input
to be any format. False means that a sparse matrix input will
raise an error.
force_all_finite : bool or 'allow-nan', default=True
Whether to raise an error on np.inf, np.nan, pd.NA in array. The
possibilities are:
- True: Force all values of array to be finite.
- False: accepts np.inf, np.nan, pd.NA in array.
- 'allow-nan': accepts only np.nan and pd.NA values in array. Values
cannot be infinite.
.. versionadded:: 0.22
``force_all_finite`` accepts the string ``'allow-nan'``.
.. versionchanged:: 0.23
Accepts `pd.NA` and converts it into `np.nan`.
copy : bool, default=False
Whether a forced copy will be triggered. If copy=False, a copy might
be triggered by a conversion.
.. versionadded:: 0.22
Returns
-------
safe_X : {array-like, sparse matrix} of shape (n_samples_X, n_features)
An array equal to X, guaranteed to be a numpy array.
safe_Y : {array-like, sparse matrix} of shape (n_samples_Y, n_features)
An array equal to Y if Y was not None, guaranteed to be a numpy array.
If Y was None, safe_Y will be a pointer to X.
"""
X, Y, dtype_float = _return_float_dtype(X, Y)
estimator = "check_pairwise_arrays"
if dtype is None:
dtype = dtype_float
if Y is X or Y is None:
X = Y = check_array(
X,
accept_sparse=accept_sparse,
dtype=dtype,
copy=copy,
force_all_finite=force_all_finite,
estimator=estimator,
)
else:
X = check_array(
X,
accept_sparse=accept_sparse,
dtype=dtype,
copy=copy,
force_all_finite=force_all_finite,
estimator=estimator,
)
Y = check_array(
Y,
accept_sparse=accept_sparse,
dtype=dtype,
copy=copy,
force_all_finite=force_all_finite,
estimator=estimator,
)
if precomputed:
if X.shape[1] != Y.shape[0]:
raise ValueError(
"Precomputed metric requires shape "
"(n_queries, n_indexed). Got (%d, %d) "
"for %d indexed." % (X.shape[0], X.shape[1], Y.shape[0])
)
elif X.shape[1] != Y.shape[1]:
raise ValueError(
"Incompatible dimension for X and Y matrices: "
"X.shape[1] == %d while Y.shape[1] == %d" % (X.shape[1], Y.shape[1])
)
return X, Y
def check_paired_arrays(X, Y):
"""Set X and Y appropriately and checks inputs for paired distances.
All paired distance metrics should use this function first to assert that
the given parameters are correct and safe to use.
Specifically, this function first ensures that both X and Y are arrays,
then checks that they are at least two dimensional while ensuring that
their elements are floats. Finally, the function checks that the size
of the dimensions of the two arrays are equal.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples_X, n_features)
Y : {array-like, sparse matrix} of shape (n_samples_Y, n_features)
Returns
-------
safe_X : {array-like, sparse matrix} of shape (n_samples_X, n_features)
An array equal to X, guaranteed to be a numpy array.
safe_Y : {array-like, sparse matrix} of shape (n_samples_Y, n_features)
An array equal to Y if Y was not None, guaranteed to be a numpy array.
If Y was None, safe_Y will be a pointer to X.
"""
X, Y = check_pairwise_arrays(X, Y)
if X.shape != Y.shape:
raise ValueError(
"X and Y should be of same shape. They were respectively %r and %r long."
% (X.shape, Y.shape)
)
return X, Y
# Pairwise distances
def euclidean_distances(
X, Y=None, *, Y_norm_squared=None, squared=False, X_norm_squared=None
):
"""
Compute the distance matrix between each pair from a vector array X and Y.
For efficiency reasons, the euclidean distance between a pair of row
vector x and y is computed as::
dist(x, y) = sqrt(dot(x, x) - 2 * dot(x, y) + dot(y, y))
This formulation has two advantages over other ways of computing distances.
First, it is computationally efficient when dealing with sparse data.
Second, if one argument varies but the other remains unchanged, then
`dot(x, x)` and/or `dot(y, y)` can be pre-computed.
However, this is not the most precise way of doing this computation,
because this equation potentially suffers from "catastrophic cancellation".
Also, the distance matrix returned by this function may not be exactly
symmetric as required by, e.g., ``scipy.spatial.distance`` functions.
Read more in the :ref:`User Guide <metrics>`.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples_X, n_features)
An array where each row is a sample and each column is a feature.
Y : {array-like, sparse matrix} of shape (n_samples_Y, n_features), \
default=None
An array where each row is a sample and each column is a feature.
If `None`, method uses `Y=X`.
Y_norm_squared : array-like of shape (n_samples_Y,) or (n_samples_Y, 1) \
or (1, n_samples_Y), default=None
Pre-computed dot-products of vectors in Y (e.g.,
``(Y**2).sum(axis=1)``)
May be ignored in some cases, see the note below.
squared : bool, default=False
Return squared Euclidean distances.
X_norm_squared : array-like of shape (n_samples_X,) or (n_samples_X, 1) \
or (1, n_samples_X), default=None
Pre-computed dot-products of vectors in X (e.g.,
``(X**2).sum(axis=1)``)
May be ignored in some cases, see the note below.
Returns
-------
distances : ndarray of shape (n_samples_X, n_samples_Y)
Returns the distances between the row vectors of `X`
and the row vectors of `Y`.
See Also
--------
paired_distances : Distances betweens pairs of elements of X and Y.
Notes
-----
To achieve a better accuracy, `X_norm_squared` and `Y_norm_squared` may be
unused if they are passed as `np.float32`.
Examples
--------
>>> from sklearn.metrics.pairwise import euclidean_distances
>>> X = [[0, 1], [1, 1]]
>>> # distance between rows of X
>>> euclidean_distances(X, X)
array([[0., 1.],
[1., 0.]])
>>> # get distance to origin
>>> euclidean_distances(X, [[0, 0]])
array([[1. ],
[1.41421356]])
"""
X, Y = check_pairwise_arrays(X, Y)
if X_norm_squared is not None:
X_norm_squared = check_array(X_norm_squared, ensure_2d=False)
original_shape = X_norm_squared.shape
if X_norm_squared.shape == (X.shape[0],):
X_norm_squared = X_norm_squared.reshape(-1, 1)
if X_norm_squared.shape == (1, X.shape[0]):
X_norm_squared = X_norm_squared.T
if X_norm_squared.shape != (X.shape[0], 1):
raise ValueError(
f"Incompatible dimensions for X of shape {X.shape} and "
f"X_norm_squared of shape {original_shape}."
)
if Y_norm_squared is not None:
Y_norm_squared = check_array(Y_norm_squared, ensure_2d=False)
original_shape = Y_norm_squared.shape
if Y_norm_squared.shape == (Y.shape[0],):
Y_norm_squared = Y_norm_squared.reshape(1, -1)
if Y_norm_squared.shape == (Y.shape[0], 1):
Y_norm_squared = Y_norm_squared.T
if Y_norm_squared.shape != (1, Y.shape[0]):
raise ValueError(
f"Incompatible dimensions for Y of shape {Y.shape} and "
f"Y_norm_squared of shape {original_shape}."
)
return _euclidean_distances(X, Y, X_norm_squared, Y_norm_squared, squared)
def _euclidean_distances(X, Y, X_norm_squared=None, Y_norm_squared=None, squared=False):
"""Computational part of euclidean_distances
Assumes inputs are already checked.
If norms are passed as float32, they are unused. If arrays are passed as
float32, norms needs to be recomputed on upcast chunks.
TODO: use a float64 accumulator in row_norms to avoid the latter.
"""
if X_norm_squared is not None:
if X_norm_squared.dtype == np.float32:
XX = None
else:
XX = X_norm_squared.reshape(-1, 1)
elif X.dtype == np.float32:
XX = None
else:
XX = row_norms(X, squared=True)[:, np.newaxis]
if Y is X:
YY = None if XX is None else XX.T
else:
if Y_norm_squared is not None:
if Y_norm_squared.dtype == np.float32:
YY = None
else:
YY = Y_norm_squared.reshape(1, -1)
elif Y.dtype == np.float32:
YY = None
else:
YY = row_norms(Y, squared=True)[np.newaxis, :]
if X.dtype == np.float32:
# To minimize precision issues with float32, we compute the distance
# matrix on chunks of X and Y upcast to float64
distances = _euclidean_distances_upcast(X, XX, Y, YY)
else:
# if dtype is already float64, no need to chunk and upcast
distances = -2 * safe_sparse_dot(X, Y.T, dense_output=True)
distances += XX
distances += YY
np.maximum(distances, 0, out=distances)
# Ensure that distances between vectors and themselves are set to 0.0.
# This may not be the case due to floating point rounding errors.
if X is Y:
np.fill_diagonal(distances, 0)
return distances if squared else np.sqrt(distances, out=distances)
def nan_euclidean_distances(
X, Y=None, *, squared=False, missing_values=np.nan, copy=True
):
"""Calculate the euclidean distances in the presence of missing values.
Compute the euclidean distance between each pair of samples in X and Y,
where Y=X is assumed if Y=None. When calculating the distance between a
pair of samples, this formulation ignores feature coordinates with a
missing value in either sample and scales up the weight of the remaining
coordinates:
dist(x,y) = sqrt(weight * sq. distance from present coordinates)
where,
weight = Total # of coordinates / # of present coordinates
For example, the distance between ``[3, na, na, 6]`` and ``[1, na, 4, 5]``
is:
.. math::
\\sqrt{\\frac{4}{2}((3-1)^2 + (6-5)^2)}
If all the coordinates are missing or if there are no common present
coordinates then NaN is returned for that pair.
Read more in the :ref:`User Guide <metrics>`.
.. versionadded:: 0.22
Parameters
----------
X : array-like of shape (n_samples_X, n_features)
An array where each row is a sample and each column is a feature.
Y : array-like of shape (n_samples_Y, n_features), default=None
An array where each row is a sample and each column is a feature.
If `None`, method uses `Y=X`.
squared : bool, default=False
Return squared Euclidean distances.
missing_values : np.nan or int, default=np.nan
Representation of missing value.
copy : bool, default=True
Make and use a deep copy of X and Y (if Y exists).
Returns
-------
distances : ndarray of shape (n_samples_X, n_samples_Y)
Returns the distances between the row vectors of `X`
and the row vectors of `Y`.
See Also
--------
paired_distances : Distances between pairs of elements of X and Y.
References
----------
* John K. Dixon, "Pattern Recognition with Partly Missing Data",
IEEE Transactions on Systems, Man, and Cybernetics, Volume: 9, Issue:
10, pp. 617 - 621, Oct. 1979.
http://ieeexplore.ieee.org/abstract/document/4310090/
Examples
--------
>>> from sklearn.metrics.pairwise import nan_euclidean_distances
>>> nan = float("NaN")
>>> X = [[0, 1], [1, nan]]
>>> nan_euclidean_distances(X, X) # distance between rows of X
array([[0. , 1.41421356],
[1.41421356, 0. ]])
>>> # get distance to origin
>>> nan_euclidean_distances(X, [[0, 0]])
array([[1. ],
[1.41421356]])
"""
force_all_finite = "allow-nan" if is_scalar_nan(missing_values) else True
X, Y = check_pairwise_arrays(
X, Y, accept_sparse=False, force_all_finite=force_all_finite, copy=copy
)
# Get missing mask for X
missing_X = _get_mask(X, missing_values)
# Get missing mask for Y
missing_Y = missing_X if Y is X else _get_mask(Y, missing_values)
# set missing values to zero
X[missing_X] = 0
Y[missing_Y] = 0
distances = euclidean_distances(X, Y, squared=True)
# Adjust distances for missing values
XX = X * X
YY = Y * Y
distances -= np.dot(XX, missing_Y.T)
distances -= np.dot(missing_X, YY.T)
np.clip(distances, 0, None, out=distances)
if X is Y:
# Ensure that distances between vectors and themselves are set to 0.0.
# This may not be the case due to floating point rounding errors.
np.fill_diagonal(distances, 0.0)
present_X = 1 - missing_X
present_Y = present_X if Y is X else ~missing_Y
present_count = np.dot(present_X, present_Y.T)
distances[present_count == 0] = np.nan
# avoid divide by zero
np.maximum(1, present_count, out=present_count)
distances /= present_count
distances *= X.shape[1]
if not squared:
np.sqrt(distances, out=distances)
return distances
def _euclidean_distances_upcast(X, XX=None, Y=None, YY=None, batch_size=None):
"""Euclidean distances between X and Y.
Assumes X and Y have float32 dtype.
Assumes XX and YY have float64 dtype or are None.
X and Y are upcast to float64 by chunks, which size is chosen to limit
memory increase by approximately 10% (at least 10MiB).
"""
n_samples_X = X.shape[0]
n_samples_Y = Y.shape[0]
n_features = X.shape[1]
distances = np.empty((n_samples_X, n_samples_Y), dtype=np.float32)
if batch_size is None:
x_density = X.nnz / np.prod(X.shape) if issparse(X) else 1
y_density = Y.nnz / np.prod(Y.shape) if issparse(Y) else 1
# Allow 10% more memory than X, Y and the distance matrix take (at
# least 10MiB)
maxmem = max(
(
(x_density * n_samples_X + y_density * n_samples_Y) * n_features
+ (x_density * n_samples_X * y_density * n_samples_Y)
)
/ 10,
10 * 2**17,
)
# The increase amount of memory in 8-byte blocks is:
# - x_density * batch_size * n_features (copy of chunk of X)
# - y_density * batch_size * n_features (copy of chunk of Y)
# - batch_size * batch_size (chunk of distance matrix)
# Hence x² + (xd+yd)kx = M, where x=batch_size, k=n_features, M=maxmem
# xd=x_density and yd=y_density
tmp = (x_density + y_density) * n_features
batch_size = (-tmp + np.sqrt(tmp**2 + 4 * maxmem)) / 2
batch_size = max(int(batch_size), 1)
x_batches = gen_batches(n_samples_X, batch_size)
for i, x_slice in enumerate(x_batches):
X_chunk = X[x_slice].astype(np.float64)
if XX is None:
XX_chunk = row_norms(X_chunk, squared=True)[:, np.newaxis]
else:
XX_chunk = XX[x_slice]
y_batches = gen_batches(n_samples_Y, batch_size)
for j, y_slice in enumerate(y_batches):
if X is Y and j < i:
# when X is Y the distance matrix is symmetric so we only need
# to compute half of it.
d = distances[y_slice, x_slice].T
else:
Y_chunk = Y[y_slice].astype(np.float64)
if YY is None:
YY_chunk = row_norms(Y_chunk, squared=True)[np.newaxis, :]
else:
YY_chunk = YY[:, y_slice]
d = -2 * safe_sparse_dot(X_chunk, Y_chunk.T, dense_output=True)
d += XX_chunk
d += YY_chunk
distances[x_slice, y_slice] = d.astype(np.float32, copy=False)
return distances
def _argmin_min_reduce(dist, start):
# `start` is specified in the signature but not used. This is because the higher
# order `pairwise_distances_chunked` function needs reduction functions that are
# passed as argument to have a two arguments signature.
indices = dist.argmin(axis=1)
values = dist[np.arange(dist.shape[0]), indices]
return indices, values
def _argmin_reduce(dist, start):
# `start` is specified in the signature but not used. This is because the higher
# order `pairwise_distances_chunked` function needs reduction functions that are
# passed as argument to have a two arguments signature.
return dist.argmin(axis=1)
def pairwise_distances_argmin_min(
X, Y, *, axis=1, metric="euclidean", metric_kwargs=None
):
"""Compute minimum distances between one point and a set of points.
This function computes for each row in X, the index of the row of Y which
is closest (according to the specified distance). The minimal distances are
also returned.
This is mostly equivalent to calling:
(pairwise_distances(X, Y=Y, metric=metric).argmin(axis=axis),
pairwise_distances(X, Y=Y, metric=metric).min(axis=axis))
but uses much less memory, and is faster for large arrays.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples_X, n_features)
Array containing points.
Y : {array-like, sparse matrix} of shape (n_samples_Y, n_features)
Array containing points.
axis : int, default=1
Axis along which the argmin and distances are to be computed.
metric : str or callable, default='euclidean'
Metric to use for distance computation. Any metric from scikit-learn
or scipy.spatial.distance can be used.
If metric is a callable function, it is called on each
pair of instances (rows) and the resulting value recorded. The callable
should take two arrays as input and return one value indicating the
distance between them. This works for Scipy's metrics, but is less
efficient than passing the metric name as a string.
Distance matrices are not supported.
Valid values for metric are:
- from scikit-learn: ['cityblock', 'cosine', 'euclidean', 'l1', 'l2',
'manhattan']
- from scipy.spatial.distance: ['braycurtis', 'canberra', 'chebyshev',
'correlation', 'dice', 'hamming', 'jaccard', 'kulsinski',
'mahalanobis', 'minkowski', 'rogerstanimoto', 'russellrao',
'seuclidean', 'sokalmichener', 'sokalsneath', 'sqeuclidean',
'yule']
See the documentation for scipy.spatial.distance for details on these
metrics.
metric_kwargs : dict, default=None
Keyword arguments to pass to specified metric function.
Returns
-------
argmin : ndarray
Y[argmin[i], :] is the row in Y that is closest to X[i, :].
distances : ndarray
distances[i] is the distance between the i-th row in X and the
argmin[i]-th row in Y.
See Also
--------
sklearn.metrics.pairwise_distances
sklearn.metrics.pairwise_distances_argmin
"""
X, Y = check_pairwise_arrays(X, Y)
if axis == 0:
X, Y = Y, X
if metric_kwargs is None:
metric_kwargs = {}
if PairwiseDistancesArgKmin.is_usable_for(X, Y, metric):
# This is an adaptor for one "sqeuclidean" specification.
# For this backend, we can directly use "sqeuclidean".
if metric_kwargs.get("squared", False) and metric == "euclidean":
metric = "sqeuclidean"
metric_kwargs = {}
values, indices = PairwiseDistancesArgKmin.compute(
X=X,
Y=Y,
k=1,
metric=metric,
metric_kwargs=metric_kwargs,
strategy="auto",
return_distance=True,
)
values = values.flatten()
indices = indices.flatten()
else:
# TODO: once PairwiseDistancesArgKmin supports sparse input matrices and 32 bit,
# we won't need to fallback to pairwise_distances_chunked anymore.
# Turn off check for finiteness because this is costly and because arrays
# have already been validated.
with config_context(assume_finite=True):
indices, values = zip(
*pairwise_distances_chunked(
X, Y, reduce_func=_argmin_min_reduce, metric=metric, **metric_kwargs
)
)
indices = np.concatenate(indices)
values = np.concatenate(values)
return indices, values
def pairwise_distances_argmin(X, Y, *, axis=1, metric="euclidean", metric_kwargs=None):
"""Compute minimum distances between one point and a set of points.
This function computes for each row in X, the index of the row of Y which
is closest (according to the specified distance).
This is mostly equivalent to calling:
pairwise_distances(X, Y=Y, metric=metric).argmin(axis=axis)
but uses much less memory, and is faster for large arrays.
This function works with dense 2D arrays only.
Parameters
----------
X : array-like of shape (n_samples_X, n_features)
Array containing points.
Y : array-like of shape (n_samples_Y, n_features)
Arrays containing points.
axis : int, default=1
Axis along which the argmin and distances are to be computed.
metric : str or callable, default="euclidean"
Metric to use for distance computation. Any metric from scikit-learn
or scipy.spatial.distance can be used.
If metric is a callable function, it is called on each
pair of instances (rows) and the resulting value recorded. The callable
should take two arrays as input and return one value indicating the
distance between them. This works for Scipy's metrics, but is less
efficient than passing the metric name as a string.
Distance matrices are not supported.
Valid values for metric are:
- from scikit-learn: ['cityblock', 'cosine', 'euclidean', 'l1', 'l2',
'manhattan']
- from scipy.spatial.distance: ['braycurtis', 'canberra', 'chebyshev',
'correlation', 'dice', 'hamming', 'jaccard', 'kulsinski',
'mahalanobis', 'minkowski', 'rogerstanimoto', 'russellrao',
'seuclidean', 'sokalmichener', 'sokalsneath', 'sqeuclidean',
'yule']
See the documentation for scipy.spatial.distance for details on these
metrics.
metric_kwargs : dict, default=None
Keyword arguments to pass to specified metric function.
Returns
-------
argmin : numpy.ndarray
Y[argmin[i], :] is the row in Y that is closest to X[i, :].
See Also
--------
sklearn.metrics.pairwise_distances
sklearn.metrics.pairwise_distances_argmin_min
"""
if metric_kwargs is None:
metric_kwargs = {}
X, Y = check_pairwise_arrays(X, Y)
if axis == 0:
X, Y = Y, X
if metric_kwargs is None:
metric_kwargs = {}
if PairwiseDistancesArgKmin.is_usable_for(X, Y, metric):
# This is an adaptor for one "sqeuclidean" specification.
# For this backend, we can directly use "sqeuclidean".
if metric_kwargs.get("squared", False) and metric == "euclidean":
metric = "sqeuclidean"
metric_kwargs = {}
indices = PairwiseDistancesArgKmin.compute(
X=X,
Y=Y,
k=1,
metric=metric,
metric_kwargs=metric_kwargs,
strategy="auto",
return_distance=False,
)
indices = indices.flatten()
else:
# TODO: once PairwiseDistancesArgKmin supports sparse input matrices and 32 bit,
# we won't need to fallback to pairwise_distances_chunked anymore.
# Turn off check for finiteness because this is costly and because arrays
# have already been validated.
with config_context(assume_finite=True):
indices = np.concatenate(
list(
# This returns a np.ndarray generator whose arrays we need
# to flatten into one.
pairwise_distances_chunked(
X, Y, reduce_func=_argmin_reduce, metric=metric, **metric_kwargs
)
)
)
return indices
def haversine_distances(X, Y=None):
"""Compute the Haversine distance between samples in X and Y.
The Haversine (or great circle) distance is the angular distance between
two points on the surface of a sphere. The first coordinate of each point
is assumed to be the latitude, the second is the longitude, given
in radians. The dimension of the data must be 2.
.. math::
D(x, y) = 2\\arcsin[\\sqrt{\\sin^2((x1 - y1) / 2)
+ \\cos(x1)\\cos(y1)\\sin^2((x2 - y2) / 2)}]
Parameters
----------
X : array-like of shape (n_samples_X, 2)
Y : array-like of shape (n_samples_Y, 2), default=None
Returns
-------
distance : ndarray of shape (n_samples_X, n_samples_Y)
Notes
-----
As the Earth is nearly spherical, the haversine formula provides a good
approximation of the distance between two points of the Earth surface, with
a less than 1% error on average.
Examples
--------
We want to calculate the distance between the Ezeiza Airport
(Buenos Aires, Argentina) and the Charles de Gaulle Airport (Paris,
France).
>>> from sklearn.metrics.pairwise import haversine_distances
>>> from math import radians
>>> bsas = [-34.83333, -58.5166646]
>>> paris = [49.0083899664, 2.53844117956]
>>> bsas_in_radians = [radians(_) for _ in bsas]
>>> paris_in_radians = [radians(_) for _ in paris]
>>> result = haversine_distances([bsas_in_radians, paris_in_radians])
>>> result * 6371000/1000 # multiply by Earth radius to get kilometers
array([[ 0. , 11099.54035582],
[11099.54035582, 0. ]])
"""
from ..metrics import DistanceMetric
return DistanceMetric.get_metric("haversine").pairwise(X, Y)
def manhattan_distances(X, Y=None, *, sum_over_features=True):
"""Compute the L1 distances between the vectors in X and Y.
With sum_over_features equal to False it returns the componentwise
distances.
Read more in the :ref:`User Guide <metrics>`.
Parameters
----------
X : array-like of shape (n_samples_X, n_features)
An array where each row is a sample and each column is a feature.
Y : array-like of shape (n_samples_Y, n_features), default=None
An array where each row is a sample and each column is a feature.
If `None`, method uses `Y=X`.
sum_over_features : bool, default=True
If True the function returns the pairwise distance matrix
else it returns the componentwise L1 pairwise-distances.
Not supported for sparse matrix inputs.
Returns
-------
D : ndarray of shape (n_samples_X * n_samples_Y, n_features) or \
(n_samples_X, n_samples_Y)
If sum_over_features is False shape is
(n_samples_X * n_samples_Y, n_features) and D contains the
componentwise L1 pairwise-distances (ie. absolute difference),
else shape is (n_samples_X, n_samples_Y) and D contains
the pairwise L1 distances.
Notes
-----
When X and/or Y are CSR sparse matrices and they are not already
in canonical format, this function modifies them in-place to
make them canonical.
Examples
--------
>>> from sklearn.metrics.pairwise import manhattan_distances
>>> manhattan_distances([[3]], [[3]])
array([[0.]])
>>> manhattan_distances([[3]], [[2]])
array([[1.]])
>>> manhattan_distances([[2]], [[3]])
array([[1.]])
>>> manhattan_distances([[1, 2], [3, 4]],\
[[1, 2], [0, 3]])
array([[0., 2.],
[4., 4.]])
>>> import numpy as np
>>> X = np.ones((1, 2))
>>> y = np.full((2, 2), 2.)
>>> manhattan_distances(X, y, sum_over_features=False)
array([[1., 1.],
[1., 1.]])
"""
X, Y = check_pairwise_arrays(X, Y)
if issparse(X) or issparse(Y):
if not sum_over_features:
raise TypeError(
"sum_over_features=%r not supported for sparse matrices"
% sum_over_features
)
X = csr_matrix(X, copy=False)
Y = csr_matrix(Y, copy=False)
X.sum_duplicates() # this also sorts indices in-place
Y.sum_duplicates()
D = np.zeros((X.shape[0], Y.shape[0]))
_sparse_manhattan(X.data, X.indices, X.indptr, Y.data, Y.indices, Y.indptr, D)
return D
if sum_over_features:
return distance.cdist(X, Y, "cityblock")
D = X[:, np.newaxis, :] - Y[np.newaxis, :, :]
D = np.abs(D, D)
return D.reshape((-1, X.shape[1]))
def cosine_distances(X, Y=None):
"""Compute cosine distance between samples in X and Y.
Cosine distance is defined as 1.0 minus the cosine similarity.
Read more in the :ref:`User Guide <metrics>`.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples_X, n_features)
Matrix `X`.
Y : {array-like, sparse matrix} of shape (n_samples_Y, n_features), \
default=None
Matrix `Y`.
Returns
-------
distance matrix : ndarray of shape (n_samples_X, n_samples_Y)
See Also
--------
cosine_similarity
scipy.spatial.distance.cosine : Dense matrices only.
"""
# 1.0 - cosine_similarity(X, Y) without copy
S = cosine_similarity(X, Y)
S *= -1
S += 1
np.clip(S, 0, 2, out=S)
if X is Y or Y is None:
# Ensure that distances between vectors and themselves are set to 0.0.
# This may not be the case due to floating point rounding errors.
S[np.diag_indices_from(S)] = 0.0
return S
# Paired distances
def paired_euclidean_distances(X, Y):
"""
Computes the paired euclidean distances between X and Y.
Read more in the :ref:`User Guide <metrics>`.
Parameters
----------
X : array-like of shape (n_samples, n_features)
Y : array-like of shape (n_samples, n_features)
Returns
-------
distances : ndarray of shape (n_samples,)
"""
X, Y = check_paired_arrays(X, Y)
return row_norms(X - Y)
def paired_manhattan_distances(X, Y):
"""Compute the L1 distances between the vectors in X and Y.
Read more in the :ref:`User Guide <metrics>`.
Parameters
----------
X : array-like of shape (n_samples, n_features)
Y : array-like of shape (n_samples, n_features)
Returns
-------
distances : ndarray of shape (n_samples,)
"""
X, Y = check_paired_arrays(X, Y)
diff = X - Y
if issparse(diff):
diff.data = np.abs(diff.data)
return np.squeeze(np.array(diff.sum(axis=1)))
else:
return np.abs(diff).sum(axis=-1)
def paired_cosine_distances(X, Y):
"""
Compute the paired cosine distances between X and Y.
Read more in the :ref:`User Guide <metrics>`.
Parameters
----------
X : array-like of shape (n_samples, n_features)
An array where each row is a sample and each column is a feature.
Y : array-like of shape (n_samples, n_features)
An array where each row is a sample and each column is a feature.
Returns
-------
distances : ndarray of shape (n_samples,)
Returns the distances between the row vectors of `X`
and the row vectors of `Y`, where `distances[i]` is the
distance between `X[i]` and `Y[i]`.
Notes
-----
The cosine distance is equivalent to the half the squared
euclidean distance if each sample is normalized to unit norm.
"""
X, Y = check_paired_arrays(X, Y)
return 0.5 * row_norms(normalize(X) - normalize(Y), squared=True)
PAIRED_DISTANCES = {
"cosine": paired_cosine_distances,
"euclidean": paired_euclidean_distances,
"l2": paired_euclidean_distances,
"l1": paired_manhattan_distances,
"manhattan": paired_manhattan_distances,
"cityblock": paired_manhattan_distances,
}
def paired_distances(X, Y, *, metric="euclidean", **kwds):
"""
Compute the paired distances between X and Y.
Compute the distances between (X[0], Y[0]), (X[1], Y[1]), etc...
Read more in the :ref:`User Guide <metrics>`.
Parameters
----------
X : ndarray of shape (n_samples, n_features)
Array 1 for distance computation.
Y : ndarray of shape (n_samples, n_features)
Array 2 for distance computation.
metric : str or callable, default="euclidean"
The metric to use when calculating distance between instances in a
feature array. If metric is a string, it must be one of the options
specified in PAIRED_DISTANCES, including "euclidean",
"manhattan", or "cosine".
Alternatively, if metric is a callable function, it is called on each
pair of instances (rows) and the resulting value recorded. The callable
should take two arrays from `X` as input and return a value indicating
the distance between them.
**kwds : dict
Unused parameters.
Returns
-------
distances : ndarray of shape (n_samples,)
Returns the distances between the row vectors of `X`
and the row vectors of `Y`.
See Also
--------
pairwise_distances : Computes the distance between every pair of samples.
Examples
--------
>>> from sklearn.metrics.pairwise import paired_distances
>>> X = [[0, 1], [1, 1]]
>>> Y = [[0, 1], [2, 1]]
>>> paired_distances(X, Y)
array([0., 1.])
"""
if metric in PAIRED_DISTANCES:
func = PAIRED_DISTANCES[metric]
return func(X, Y)
elif callable(metric):
# Check the matrix first (it is usually done by the metric)
X, Y = check_paired_arrays(X, Y)
distances = np.zeros(len(X))
for i in range(len(X)):
distances[i] = metric(X[i], Y[i])
return distances
else:
raise ValueError("Unknown distance %s" % metric)
# Kernels
def linear_kernel(X, Y=None, dense_output=True):
"""
Compute the linear kernel between X and Y.
Read more in the :ref:`User Guide <linear_kernel>`.
Parameters
----------
X : ndarray of shape (n_samples_X, n_features)
A feature array.
Y : ndarray of shape (n_samples_Y, n_features), default=None
An optional second feature array. If `None`, uses `Y=X`.
dense_output : bool, default=True
Whether to return dense output even when the input is sparse. If
``False``, the output is sparse if both input arrays are sparse.
.. versionadded:: 0.20
Returns
-------
Gram matrix : ndarray of shape (n_samples_X, n_samples_Y)
The Gram matrix of the linear kernel, i.e. `X @ Y.T`.
"""
X, Y = check_pairwise_arrays(X, Y)
return safe_sparse_dot(X, Y.T, dense_output=dense_output)
def polynomial_kernel(X, Y=None, degree=3, gamma=None, coef0=1):
"""
Compute the polynomial kernel between X and Y::
K(X, Y) = (gamma <X, Y> + coef0)^degree
Read more in the :ref:`User Guide <polynomial_kernel>`.
Parameters
----------
X : ndarray of shape (n_samples_X, n_features)
Y : ndarray of shape (n_samples_Y, n_features), default=None
degree : int, default=3
gamma : float, default=None
If None, defaults to 1.0 / n_features.
coef0 : float, default=1
Returns
-------
Gram matrix : ndarray of shape (n_samples_X, n_samples_Y)
"""
X, Y = check_pairwise_arrays(X, Y)
if gamma is None:
gamma = 1.0 / X.shape[1]
K = safe_sparse_dot(X, Y.T, dense_output=True)
K *= gamma
K += coef0
K **= degree
return K
def sigmoid_kernel(X, Y=None, gamma=None, coef0=1):
"""
Compute the sigmoid kernel between X and Y::
K(X, Y) = tanh(gamma <X, Y> + coef0)
Read more in the :ref:`User Guide <sigmoid_kernel>`.
Parameters
----------
X : ndarray of shape (n_samples_X, n_features)
Y : ndarray of shape (n_samples_Y, n_features), default=None
If `None`, uses `Y=X`.
gamma : float, default=None
If None, defaults to 1.0 / n_features.
coef0 : float, default=1
Returns
-------
Gram matrix : ndarray of shape (n_samples_X, n_samples_Y)
"""
X, Y = check_pairwise_arrays(X, Y)
if gamma is None:
gamma = 1.0 / X.shape[1]
K = safe_sparse_dot(X, Y.T, dense_output=True)
K *= gamma
K += coef0
np.tanh(K, K) # compute tanh in-place
return K
def rbf_kernel(X, Y=None, gamma=None):
"""
Compute the rbf (gaussian) kernel between X and Y::
K(x, y) = exp(-gamma ||x-y||^2)
for each pair of rows x in X and y in Y.
Read more in the :ref:`User Guide <rbf_kernel>`.
Parameters
----------
X : ndarray of shape (n_samples_X, n_features)
Y : ndarray of shape (n_samples_Y, n_features), default=None
If `None`, uses `Y=X`.
gamma : float, default=None
If None, defaults to 1.0 / n_features.
Returns
-------
kernel_matrix : ndarray of shape (n_samples_X, n_samples_Y)
"""
X, Y = check_pairwise_arrays(X, Y)
if gamma is None:
gamma = 1.0 / X.shape[1]
K = euclidean_distances(X, Y, squared=True)
K *= -gamma
np.exp(K, K) # exponentiate K in-place
return K
def laplacian_kernel(X, Y=None, gamma=None):
"""Compute the laplacian kernel between X and Y.
The laplacian kernel is defined as::
K(x, y) = exp(-gamma ||x-y||_1)
for each pair of rows x in X and y in Y.
Read more in the :ref:`User Guide <laplacian_kernel>`.
.. versionadded:: 0.17
Parameters
----------
X : ndarray of shape (n_samples_X, n_features)
Y : ndarray of shape (n_samples_Y, n_features), default=None
If `None`, uses `Y=X`.
gamma : float, default=None
If None, defaults to 1.0 / n_features.
Returns
-------
kernel_matrix : ndarray of shape (n_samples_X, n_samples_Y)
"""
X, Y = check_pairwise_arrays(X, Y)
if gamma is None:
gamma = 1.0 / X.shape[1]
K = -gamma * manhattan_distances(X, Y)
np.exp(K, K) # exponentiate K in-place
return K
def cosine_similarity(X, Y=None, dense_output=True):
"""Compute cosine similarity between samples in X and Y.
Cosine similarity, or the cosine kernel, computes similarity as the
normalized dot product of X and Y:
K(X, Y) = <X, Y> / (||X||*||Y||)
On L2-normalized data, this function is equivalent to linear_kernel.
Read more in the :ref:`User Guide <cosine_similarity>`.
Parameters
----------
X : {ndarray, sparse matrix} of shape (n_samples_X, n_features)
Input data.
Y : {ndarray, sparse matrix} of shape (n_samples_Y, n_features), \
default=None
Input data. If ``None``, the output will be the pairwise
similarities between all samples in ``X``.
dense_output : bool, default=True
Whether to return dense output even when the input is sparse. If
``False``, the output is sparse if both input arrays are sparse.
.. versionadded:: 0.17
parameter ``dense_output`` for dense output.
Returns
-------
kernel matrix : ndarray of shape (n_samples_X, n_samples_Y)
"""
# to avoid recursive import
X, Y = check_pairwise_arrays(X, Y)
X_normalized = normalize(X, copy=True)
if X is Y:
Y_normalized = X_normalized
else:
Y_normalized = normalize(Y, copy=True)
K = safe_sparse_dot(X_normalized, Y_normalized.T, dense_output=dense_output)
return K
def additive_chi2_kernel(X, Y=None):
"""Computes the additive chi-squared kernel between observations in X and
Y.
The chi-squared kernel is computed between each pair of rows in X and Y. X
and Y have to be non-negative. This kernel is most commonly applied to
histograms.
The chi-squared kernel is given by::
k(x, y) = -Sum [(x - y)^2 / (x + y)]
It can be interpreted as a weighted difference per entry.
Read more in the :ref:`User Guide <chi2_kernel>`.
Notes
-----
As the negative of a distance, this kernel is only conditionally positive
definite.
Parameters
----------
X : array-like of shape (n_samples_X, n_features)
Y : ndarray of shape (n_samples_Y, n_features), default=None
If `None`, uses `Y=X`.
Returns
-------
kernel_matrix : ndarray of shape (n_samples_X, n_samples_Y)
See Also
--------
chi2_kernel : The exponentiated version of the kernel, which is usually
preferable.
sklearn.kernel_approximation.AdditiveChi2Sampler : A Fourier approximation
to this kernel.
References
----------
* Zhang, J. and Marszalek, M. and Lazebnik, S. and Schmid, C.
Local features and kernels for classification of texture and object
categories: A comprehensive study
International Journal of Computer Vision 2007
https://research.microsoft.com/en-us/um/people/manik/projects/trade-off/papers/ZhangIJCV06.pdf
"""
if issparse(X) or issparse(Y):
raise ValueError("additive_chi2 does not support sparse matrices.")
X, Y = check_pairwise_arrays(X, Y)
if (X < 0).any():
raise ValueError("X contains negative values.")
if Y is not X and (Y < 0).any():
raise ValueError("Y contains negative values.")
result = np.zeros((X.shape[0], Y.shape[0]), dtype=X.dtype)
_chi2_kernel_fast(X, Y, result)
return result
def chi2_kernel(X, Y=None, gamma=1.0):
"""Computes the exponential chi-squared kernel X and Y.
The chi-squared kernel is computed between each pair of rows in X and Y. X
and Y have to be non-negative. This kernel is most commonly applied to
histograms.
The chi-squared kernel is given by::
k(x, y) = exp(-gamma Sum [(x - y)^2 / (x + y)])
It can be interpreted as a weighted difference per entry.
Read more in the :ref:`User Guide <chi2_kernel>`.
Parameters
----------
X : array-like of shape (n_samples_X, n_features)
Y : ndarray of shape (n_samples_Y, n_features), default=None
gamma : float, default=1.
Scaling parameter of the chi2 kernel.
Returns
-------
kernel_matrix : ndarray of shape (n_samples_X, n_samples_Y)
See Also
--------
additive_chi2_kernel : The additive version of this kernel.
sklearn.kernel_approximation.AdditiveChi2Sampler : A Fourier approximation
to the additive version of this kernel.
References
----------
* Zhang, J. and Marszalek, M. and Lazebnik, S. and Schmid, C.
Local features and kernels for classification of texture and object
categories: A comprehensive study
International Journal of Computer Vision 2007
https://research.microsoft.com/en-us/um/people/manik/projects/trade-off/papers/ZhangIJCV06.pdf
"""
K = additive_chi2_kernel(X, Y)
K *= gamma
return np.exp(K, K)
# Helper functions - distance
PAIRWISE_DISTANCE_FUNCTIONS = {
# If updating this dictionary, update the doc in both distance_metrics()
# and also in pairwise_distances()!
"cityblock": manhattan_distances,
"cosine": cosine_distances,
"euclidean": euclidean_distances,
"haversine": haversine_distances,
"l2": euclidean_distances,
"l1": manhattan_distances,
"manhattan": manhattan_distances,
"precomputed": None, # HACK: precomputed is always allowed, never called
"nan_euclidean": nan_euclidean_distances,
}
def distance_metrics():
"""Valid metrics for pairwise_distances.
This function simply returns the valid pairwise distance metrics.
It exists to allow for a description of the mapping for
each of the valid strings.
The valid distance metrics, and the function they map to, are:
=============== ========================================
metric Function
=============== ========================================
'cityblock' metrics.pairwise.manhattan_distances
'cosine' metrics.pairwise.cosine_distances
'euclidean' metrics.pairwise.euclidean_distances
'haversine' metrics.pairwise.haversine_distances
'l1' metrics.pairwise.manhattan_distances
'l2' metrics.pairwise.euclidean_distances
'manhattan' metrics.pairwise.manhattan_distances
'nan_euclidean' metrics.pairwise.nan_euclidean_distances
=============== ========================================
Read more in the :ref:`User Guide <metrics>`.
"""
return PAIRWISE_DISTANCE_FUNCTIONS
def _dist_wrapper(dist_func, dist_matrix, slice_, *args, **kwargs):
"""Write in-place to a slice of a distance matrix."""
dist_matrix[:, slice_] = dist_func(*args, **kwargs)
def _parallel_pairwise(X, Y, func, n_jobs, **kwds):
"""Break the pairwise matrix in n_jobs even slices
and compute them in parallel."""
if Y is None:
Y = X
X, Y, dtype = _return_float_dtype(X, Y)
if effective_n_jobs(n_jobs) == 1:
return func(X, Y, **kwds)
# enforce a threading backend to prevent data communication overhead
fd = delayed(_dist_wrapper)
ret = np.empty((X.shape[0], Y.shape[0]), dtype=dtype, order="F")
Parallel(backend="threading", n_jobs=n_jobs)(
fd(func, ret, s, X, Y[s], **kwds)
for s in gen_even_slices(_num_samples(Y), effective_n_jobs(n_jobs))
)
if (X is Y or Y is None) and func is euclidean_distances:
# zeroing diagonal for euclidean norm.
# TODO: do it also for other norms.
np.fill_diagonal(ret, 0)
return ret
def _pairwise_callable(X, Y, metric, force_all_finite=True, **kwds):
"""Handle the callable case for pairwise_{distances,kernels}."""
X, Y = check_pairwise_arrays(X, Y, force_all_finite=force_all_finite)
if X is Y:
# Only calculate metric for upper triangle
out = np.zeros((X.shape[0], Y.shape[0]), dtype="float")
iterator = itertools.combinations(range(X.shape[0]), 2)
for i, j in iterator:
out[i, j] = metric(X[i], Y[j], **kwds)
# Make symmetric
# NB: out += out.T will produce incorrect results
out = out + out.T
# Calculate diagonal
# NB: nonzero diagonals are allowed for both metrics and kernels
for i in range(X.shape[0]):
x = X[i]
out[i, i] = metric(x, x, **kwds)
else:
# Calculate all cells
out = np.empty((X.shape[0], Y.shape[0]), dtype="float")
iterator = itertools.product(range(X.shape[0]), range(Y.shape[0]))
for i, j in iterator:
out[i, j] = metric(X[i], Y[j], **kwds)
return out
_VALID_METRICS = [
"euclidean",
"l2",
"l1",
"manhattan",
"cityblock",
"braycurtis",
"canberra",
"chebyshev",
"correlation",
"cosine",
"dice",
"hamming",
"jaccard",
"kulsinski",
"mahalanobis",
"matching",
"minkowski",
"rogerstanimoto",
"russellrao",
"seuclidean",
"sokalmichener",
"sokalsneath",
"sqeuclidean",
"yule",
"wminkowski",
"nan_euclidean",
"haversine",
]
_NAN_METRICS = ["nan_euclidean"]
def _check_chunk_size(reduced, chunk_size):
"""Checks chunk is a sequence of expected size or a tuple of same."""
if reduced is None:
return
is_tuple = isinstance(reduced, tuple)
if not is_tuple:
reduced = (reduced,)
if any(isinstance(r, tuple) or not hasattr(r, "__iter__") for r in reduced):
raise TypeError(
"reduce_func returned %r. Expected sequence(s) of length %d."
% (reduced if is_tuple else reduced[0], chunk_size)
)
if any(_num_samples(r) != chunk_size for r in reduced):
actual_size = tuple(_num_samples(r) for r in reduced)
raise ValueError(
"reduce_func returned object of length %s. "
"Expected same length as input: %d."
% (actual_size if is_tuple else actual_size[0], chunk_size)
)
def _precompute_metric_params(X, Y, metric=None, **kwds):
"""Precompute data-derived metric parameters if not provided."""
if metric == "seuclidean" and "V" not in kwds:
# There is a bug in scipy < 1.5 that will cause a crash if
# X.dtype != np.double (float64). See PR #15730
dtype = np.float64 if sp_version < parse_version("1.5") else None
if X is Y:
V = np.var(X, axis=0, ddof=1, dtype=dtype)
else:
raise ValueError(
"The 'V' parameter is required for the seuclidean metric "
"when Y is passed."
)
return {"V": V}
if metric == "mahalanobis" and "VI" not in kwds:
if X is Y:
VI = np.linalg.inv(np.cov(X.T)).T
else:
raise ValueError(
"The 'VI' parameter is required for the mahalanobis metric "
"when Y is passed."
)
return {"VI": VI}
return {}
def pairwise_distances_chunked(
X,
Y=None,
*,
reduce_func=None,
metric="euclidean",
n_jobs=None,
working_memory=None,
**kwds,
):
"""Generate a distance matrix chunk by chunk with optional reduction.
In cases where not all of a pairwise distance matrix needs to be stored at
once, this is used to calculate pairwise distances in
``working_memory``-sized chunks. If ``reduce_func`` is given, it is run
on each chunk and its return values are concatenated into lists, arrays
or sparse matrices.
Parameters
----------
X : ndarray of shape (n_samples_X, n_samples_X) or \
(n_samples_X, n_features)
Array of pairwise distances between samples, or a feature array.
The shape the array should be (n_samples_X, n_samples_X) if
metric='precomputed' and (n_samples_X, n_features) otherwise.
Y : ndarray of shape (n_samples_Y, n_features), default=None
An optional second feature array. Only allowed if
metric != "precomputed".
reduce_func : callable, default=None
The function which is applied on each chunk of the distance matrix,
reducing it to needed values. ``reduce_func(D_chunk, start)``
is called repeatedly, where ``D_chunk`` is a contiguous vertical
slice of the pairwise distance matrix, starting at row ``start``.
It should return one of: None; an array, a list, or a sparse matrix
of length ``D_chunk.shape[0]``; or a tuple of such objects. Returning
None is useful for in-place operations, rather than reductions.
If None, pairwise_distances_chunked returns a generator of vertical
chunks of the distance matrix.
metric : str or callable, default='euclidean'
The metric to use when calculating distance between instances in a
feature array. If metric is a string, it must be one of the options
allowed by scipy.spatial.distance.pdist for its metric parameter, or
a metric listed in pairwise.PAIRWISE_DISTANCE_FUNCTIONS.
If metric is "precomputed", X is assumed to be a distance matrix.
Alternatively, if metric is a callable function, it is called on each
pair of instances (rows) and the resulting value recorded. The callable
should take two arrays from X as input and return a value indicating
the distance between them.
n_jobs : int, default=None
The number of jobs to use for the computation. This works by breaking
down the pairwise matrix into n_jobs even slices and computing them in
parallel.
``None`` means 1 unless in a :obj:`joblib.parallel_backend` context.
``-1`` means using all processors. See :term:`Glossary <n_jobs>`
for more details.
working_memory : int, default=None
The sought maximum memory for temporary distance matrix chunks.
When None (default), the value of
``sklearn.get_config()['working_memory']`` is used.
`**kwds` : optional keyword parameters
Any further parameters are passed directly to the distance function.
If using a scipy.spatial.distance metric, the parameters are still
metric dependent. See the scipy docs for usage examples.
Yields
------
D_chunk : {ndarray, sparse matrix}
A contiguous slice of distance matrix, optionally processed by
``reduce_func``.
Examples
--------
Without reduce_func:
>>> import numpy as np
>>> from sklearn.metrics import pairwise_distances_chunked
>>> X = np.random.RandomState(0).rand(5, 3)
>>> D_chunk = next(pairwise_distances_chunked(X))
>>> D_chunk
array([[0. ..., 0.29..., 0.41..., 0.19..., 0.57...],
[0.29..., 0. ..., 0.57..., 0.41..., 0.76...],
[0.41..., 0.57..., 0. ..., 0.44..., 0.90...],
[0.19..., 0.41..., 0.44..., 0. ..., 0.51...],
[0.57..., 0.76..., 0.90..., 0.51..., 0. ...]])
Retrieve all neighbors and average distance within radius r:
>>> r = .2
>>> def reduce_func(D_chunk, start):
... neigh = [np.flatnonzero(d < r) for d in D_chunk]
... avg_dist = (D_chunk * (D_chunk < r)).mean(axis=1)
... return neigh, avg_dist
>>> gen = pairwise_distances_chunked(X, reduce_func=reduce_func)
>>> neigh, avg_dist = next(gen)
>>> neigh
[array([0, 3]), array([1]), array([2]), array([0, 3]), array([4])]
>>> avg_dist
array([0.039..., 0. , 0. , 0.039..., 0. ])
Where r is defined per sample, we need to make use of ``start``:
>>> r = [.2, .4, .4, .3, .1]
>>> def reduce_func(D_chunk, start):
... neigh = [np.flatnonzero(d < r[i])
... for i, d in enumerate(D_chunk, start)]
... return neigh
>>> neigh = next(pairwise_distances_chunked(X, reduce_func=reduce_func))
>>> neigh
[array([0, 3]), array([0, 1]), array([2]), array([0, 3]), array([4])]
Force row-by-row generation by reducing ``working_memory``:
>>> gen = pairwise_distances_chunked(X, reduce_func=reduce_func,
... working_memory=0)
>>> next(gen)
[array([0, 3])]
>>> next(gen)
[array([0, 1])]
"""
n_samples_X = _num_samples(X)
if metric == "precomputed":
slices = (slice(0, n_samples_X),)
else:
if Y is None:
Y = X
# We get as many rows as possible within our working_memory budget to
# store len(Y) distances in each row of output.
#
# Note:
# - this will get at least 1 row, even if 1 row of distances will
# exceed working_memory.
# - this does not account for any temporary memory usage while
# calculating distances (e.g. difference of vectors in manhattan
# distance.
chunk_n_rows = get_chunk_n_rows(
row_bytes=8 * _num_samples(Y),
max_n_rows=n_samples_X,
working_memory=working_memory,
)
slices = gen_batches(n_samples_X, chunk_n_rows)
# precompute data-derived metric params
params = _precompute_metric_params(X, Y, metric=metric, **kwds)
kwds.update(**params)
for sl in slices:
if sl.start == 0 and sl.stop == n_samples_X:
X_chunk = X # enable optimised paths for X is Y
else:
X_chunk = X[sl]
D_chunk = pairwise_distances(X_chunk, Y, metric=metric, n_jobs=n_jobs, **kwds)
if (X is Y or Y is None) and PAIRWISE_DISTANCE_FUNCTIONS.get(
metric, None
) is euclidean_distances:
# zeroing diagonal, taking care of aliases of "euclidean",
# i.e. "l2"
D_chunk.flat[sl.start :: _num_samples(X) + 1] = 0
if reduce_func is not None:
chunk_size = D_chunk.shape[0]
D_chunk = reduce_func(D_chunk, sl.start)
_check_chunk_size(D_chunk, chunk_size)
yield D_chunk
def pairwise_distances(
X, Y=None, metric="euclidean", *, n_jobs=None, force_all_finite=True, **kwds
):
"""Compute the distance matrix from a vector array X and optional Y.
This method takes either a vector array or a distance matrix, and returns
a distance matrix. If the input is a vector array, the distances are
computed. If the input is a distances matrix, it is returned instead.
This method provides a safe way to take a distance matrix as input, while
preserving compatibility with many other algorithms that take a vector
array.
If Y is given (default is None), then the returned matrix is the pairwise
distance between the arrays from both X and Y.
Valid values for metric are:
- From scikit-learn: ['cityblock', 'cosine', 'euclidean', 'l1', 'l2',
'manhattan']. These metrics support sparse matrix
inputs.
['nan_euclidean'] but it does not yet support sparse matrices.
- From scipy.spatial.distance: ['braycurtis', 'canberra', 'chebyshev',
'correlation', 'dice', 'hamming', 'jaccard', 'kulsinski', 'mahalanobis',
'minkowski', 'rogerstanimoto', 'russellrao', 'seuclidean',
'sokalmichener', 'sokalsneath', 'sqeuclidean', 'yule']
See the documentation for scipy.spatial.distance for details on these
metrics. These metrics do not support sparse matrix inputs.
Note that in the case of 'cityblock', 'cosine' and 'euclidean' (which are
valid scipy.spatial.distance metrics), the scikit-learn implementation
will be used, which is faster and has support for sparse matrices (except
for 'cityblock'). For a verbose description of the metrics from
scikit-learn, see the __doc__ of the sklearn.pairwise.distance_metrics
function.
Read more in the :ref:`User Guide <metrics>`.
Parameters
----------
X : ndarray of shape (n_samples_X, n_samples_X) or \
(n_samples_X, n_features)
Array of pairwise distances between samples, or a feature array.
The shape of the array should be (n_samples_X, n_samples_X) if
metric == "precomputed" and (n_samples_X, n_features) otherwise.
Y : ndarray of shape (n_samples_Y, n_features), default=None
An optional second feature array. Only allowed if
metric != "precomputed".
metric : str or callable, default='euclidean'
The metric to use when calculating distance between instances in a
feature array. If metric is a string, it must be one of the options
allowed by scipy.spatial.distance.pdist for its metric parameter, or
a metric listed in ``pairwise.PAIRWISE_DISTANCE_FUNCTIONS``.
If metric is "precomputed", X is assumed to be a distance matrix.
Alternatively, if metric is a callable function, it is called on each
pair of instances (rows) and the resulting value recorded. The callable
should take two arrays from X as input and return a value indicating
the distance between them.
n_jobs : int, default=None
The number of jobs to use for the computation. This works by breaking
down the pairwise matrix into n_jobs even slices and computing them in
parallel.
``None`` means 1 unless in a :obj:`joblib.parallel_backend` context.
``-1`` means using all processors. See :term:`Glossary <n_jobs>`
for more details.
force_all_finite : bool or 'allow-nan', default=True
Whether to raise an error on np.inf, np.nan, pd.NA in array. Ignored
for a metric listed in ``pairwise.PAIRWISE_DISTANCE_FUNCTIONS``. The
possibilities are:
- True: Force all values of array to be finite.
- False: accepts np.inf, np.nan, pd.NA in array.
- 'allow-nan': accepts only np.nan and pd.NA values in array. Values
cannot be infinite.
.. versionadded:: 0.22
``force_all_finite`` accepts the string ``'allow-nan'``.
.. versionchanged:: 0.23
Accepts `pd.NA` and converts it into `np.nan`.
**kwds : optional keyword parameters
Any further parameters are passed directly to the distance function.
If using a scipy.spatial.distance metric, the parameters are still
metric dependent. See the scipy docs for usage examples.
Returns
-------
D : ndarray of shape (n_samples_X, n_samples_X) or \
(n_samples_X, n_samples_Y)
A distance matrix D such that D_{i, j} is the distance between the
ith and jth vectors of the given matrix X, if Y is None.
If Y is not None, then D_{i, j} is the distance between the ith array
from X and the jth array from Y.
See Also
--------
pairwise_distances_chunked : Performs the same calculation as this
function, but returns a generator of chunks of the distance matrix, in
order to limit memory usage.
paired_distances : Computes the distances between corresponding elements
of two arrays.
"""
if (
metric not in _VALID_METRICS
and not callable(metric)
and metric != "precomputed"
):
raise ValueError(
"Unknown metric %s. Valid metrics are %s, or 'precomputed', or a callable"
% (metric, _VALID_METRICS)
)
if metric == "precomputed":
X, _ = check_pairwise_arrays(
X, Y, precomputed=True, force_all_finite=force_all_finite
)
whom = (
"`pairwise_distances`. Precomputed distance "
" need to have non-negative values."
)
check_non_negative(X, whom=whom)
return X
elif metric in PAIRWISE_DISTANCE_FUNCTIONS:
func = PAIRWISE_DISTANCE_FUNCTIONS[metric]
elif callable(metric):
func = partial(
_pairwise_callable, metric=metric, force_all_finite=force_all_finite, **kwds
)
else:
if issparse(X) or issparse(Y):
raise TypeError("scipy distance metrics do not support sparse matrices.")
dtype = bool if metric in PAIRWISE_BOOLEAN_FUNCTIONS else None
if dtype == bool and (X.dtype != bool or (Y is not None and Y.dtype != bool)):
msg = "Data was converted to boolean for metric %s" % metric
warnings.warn(msg, DataConversionWarning)
X, Y = check_pairwise_arrays(
X, Y, dtype=dtype, force_all_finite=force_all_finite
)
# precompute data-derived metric params
params = _precompute_metric_params(X, Y, metric=metric, **kwds)
kwds.update(**params)
if effective_n_jobs(n_jobs) == 1 and X is Y:
return distance.squareform(distance.pdist(X, metric=metric, **kwds))
func = partial(distance.cdist, metric=metric, **kwds)
return _parallel_pairwise(X, Y, func, n_jobs, **kwds)
# These distances require boolean arrays, when using scipy.spatial.distance
PAIRWISE_BOOLEAN_FUNCTIONS = [
"dice",
"jaccard",
"kulsinski",
"matching",
"rogerstanimoto",
"russellrao",
"sokalmichener",
"sokalsneath",
"yule",
]
# Helper functions - distance
PAIRWISE_KERNEL_FUNCTIONS = {
# If updating this dictionary, update the doc in both distance_metrics()
# and also in pairwise_distances()!
"additive_chi2": additive_chi2_kernel,
"chi2": chi2_kernel,
"linear": linear_kernel,
"polynomial": polynomial_kernel,
"poly": polynomial_kernel,
"rbf": rbf_kernel,
"laplacian": laplacian_kernel,
"sigmoid": sigmoid_kernel,
"cosine": cosine_similarity,
}
def kernel_metrics():
"""Valid metrics for pairwise_kernels.
This function simply returns the valid pairwise distance metrics.
It exists, however, to allow for a verbose description of the mapping for
each of the valid strings.
The valid distance metrics, and the function they map to, are:
=============== ========================================
metric Function
=============== ========================================
'additive_chi2' sklearn.pairwise.additive_chi2_kernel
'chi2' sklearn.pairwise.chi2_kernel
'linear' sklearn.pairwise.linear_kernel
'poly' sklearn.pairwise.polynomial_kernel
'polynomial' sklearn.pairwise.polynomial_kernel
'rbf' sklearn.pairwise.rbf_kernel
'laplacian' sklearn.pairwise.laplacian_kernel
'sigmoid' sklearn.pairwise.sigmoid_kernel
'cosine' sklearn.pairwise.cosine_similarity
=============== ========================================
Read more in the :ref:`User Guide <metrics>`.
"""
return PAIRWISE_KERNEL_FUNCTIONS
KERNEL_PARAMS = {
"additive_chi2": (),
"chi2": frozenset(["gamma"]),
"cosine": (),
"linear": (),
"poly": frozenset(["gamma", "degree", "coef0"]),
"polynomial": frozenset(["gamma", "degree", "coef0"]),
"rbf": frozenset(["gamma"]),
"laplacian": frozenset(["gamma"]),
"sigmoid": frozenset(["gamma", "coef0"]),
}
def pairwise_kernels(
X, Y=None, metric="linear", *, filter_params=False, n_jobs=None, **kwds
):
"""Compute the kernel between arrays X and optional array Y.
This method takes either a vector array or a kernel matrix, and returns
a kernel matrix. If the input is a vector array, the kernels are
computed. If the input is a kernel matrix, it is returned instead.
This method provides a safe way to take a kernel matrix as input, while
preserving compatibility with many other algorithms that take a vector
array.
If Y is given (default is None), then the returned matrix is the pairwise
kernel between the arrays from both X and Y.
Valid values for metric are:
['additive_chi2', 'chi2', 'linear', 'poly', 'polynomial', 'rbf',
'laplacian', 'sigmoid', 'cosine']
Read more in the :ref:`User Guide <metrics>`.
Parameters
----------
X : ndarray of shape (n_samples_X, n_samples_X) or \
(n_samples_X, n_features)
Array of pairwise kernels between samples, or a feature array.
The shape of the array should be (n_samples_X, n_samples_X) if
metric == "precomputed" and (n_samples_X, n_features) otherwise.
Y : ndarray of shape (n_samples_Y, n_features), default=None
A second feature array only if X has shape (n_samples_X, n_features).
metric : str or callable, default="linear"
The metric to use when calculating kernel between instances in a
feature array. If metric is a string, it must be one of the metrics
in pairwise.PAIRWISE_KERNEL_FUNCTIONS.
If metric is "precomputed", X is assumed to be a kernel matrix.
Alternatively, if metric is a callable function, it is called on each
pair of instances (rows) and the resulting value recorded. The callable
should take two rows from X as input and return the corresponding
kernel value as a single number. This means that callables from
:mod:`sklearn.metrics.pairwise` are not allowed, as they operate on
matrices, not single samples. Use the string identifying the kernel
instead.
filter_params : bool, default=False
Whether to filter invalid parameters or not.
n_jobs : int, default=None
The number of jobs to use for the computation. This works by breaking
down the pairwise matrix into n_jobs even slices and computing them in
parallel.
``None`` means 1 unless in a :obj:`joblib.parallel_backend` context.
``-1`` means using all processors. See :term:`Glossary <n_jobs>`
for more details.
**kwds : optional keyword parameters
Any further parameters are passed directly to the kernel function.
Returns
-------
K : ndarray of shape (n_samples_X, n_samples_X) or \
(n_samples_X, n_samples_Y)
A kernel matrix K such that K_{i, j} is the kernel between the
ith and jth vectors of the given matrix X, if Y is None.
If Y is not None, then K_{i, j} is the kernel between the ith array
from X and the jth array from Y.
Notes
-----
If metric is 'precomputed', Y is ignored and X is returned.
"""
# import GPKernel locally to prevent circular imports
from ..gaussian_process.kernels import Kernel as GPKernel
if metric == "precomputed":
X, _ = check_pairwise_arrays(X, Y, precomputed=True)
return X
elif isinstance(metric, GPKernel):
func = metric.__call__
elif metric in PAIRWISE_KERNEL_FUNCTIONS:
if filter_params:
kwds = {k: kwds[k] for k in kwds if k in KERNEL_PARAMS[metric]}
func = PAIRWISE_KERNEL_FUNCTIONS[metric]
elif callable(metric):
func = partial(_pairwise_callable, metric=metric, **kwds)
else:
raise ValueError("Unknown kernel %r" % metric)
return _parallel_pairwise(X, Y, func, n_jobs, **kwds)
|
manhhomienbienthuy/scikit-learn
|
sklearn/metrics/pairwise.py
|
Python
|
bsd-3-clause
| 73,354
|
[
"Gaussian"
] |
e12565a397f86ef67c86258997023cc4bcb9219cb983a94d4969007d71d4a326
|
# -*- coding: utf-8 -*-
"""
End-to-end tests for the courseware unit bookmarks.
"""
import json
from nose.plugins.attrib import attr
import requests
from ...pages.studio.auto_auth import AutoAuthPage as StudioAutoAuthPage
from ...pages.lms.auto_auth import AutoAuthPage as LmsAutoAuthPage
from ...pages.lms.bookmarks import BookmarksPage
from ...pages.lms.courseware import CoursewarePage
from ...pages.lms.course_nav import CourseNavPage
from ...pages.studio.overview import CourseOutlinePage
from ...pages.common.logout import LogoutPage
from ...pages.common import BASE_URL
from ...fixtures.course import CourseFixture, XBlockFixtureDesc
from ..helpers import EventsTestMixin, UniqueCourseTest, is_404_page
class BookmarksTestMixin(EventsTestMixin, UniqueCourseTest):
"""
Mixin with helper methods for testing Bookmarks.
"""
USERNAME = "STUDENT"
EMAIL = "student@example.com"
def create_course_fixture(self, num_chapters):
"""
Create course fixture
Arguments:
num_chapters: number of chapters to create
"""
self.course_fixture = CourseFixture( # pylint: disable=attribute-defined-outside-init
self.course_info['org'], self.course_info['number'],
self.course_info['run'], self.course_info['display_name']
)
xblocks = []
for index in range(num_chapters):
xblocks += [
XBlockFixtureDesc('chapter', 'TestSection{}'.format(index)).add_children(
XBlockFixtureDesc('sequential', 'TestSubsection{}'.format(index)).add_children(
XBlockFixtureDesc('vertical', 'TestVertical{}'.format(index))
)
)
]
self.course_fixture.add_children(*xblocks).install()
def verify_event_data(self, event_type, event_data):
"""
Verify emitted event data.
Arguments:
event_type: expected event type
event_data: expected event data
"""
actual_events = self.wait_for_events(event_filter={'event_type': event_type}, number_of_matches=1)
self.assert_events_match(event_data, actual_events)
@attr('shard_8')
class BookmarksTest(BookmarksTestMixin):
"""
Tests to verify bookmarks functionality.
"""
def setUp(self):
"""
Initialize test setup.
"""
super(BookmarksTest, self).setUp()
self.course_outline_page = CourseOutlinePage(
self.browser,
self.course_info['org'],
self.course_info['number'],
self.course_info['run']
)
self.courseware_page = CoursewarePage(self.browser, self.course_id)
self.bookmarks_page = BookmarksPage(self.browser, self.course_id)
self.course_nav = CourseNavPage(self.browser)
# Get session to be used for bookmarking units
self.session = requests.Session()
params = {'username': self.USERNAME, 'email': self.EMAIL, 'course_id': self.course_id}
response = self.session.get(BASE_URL + "/auto_auth", params=params)
self.assertTrue(response.ok, "Failed to get session")
def _test_setup(self, num_chapters=2):
"""
Setup test settings.
Arguments:
num_chapters: number of chapters to create in course
"""
self.create_course_fixture(num_chapters)
# Auto-auth register for the course.
LmsAutoAuthPage(self.browser, username=self.USERNAME, email=self.EMAIL, course_id=self.course_id).visit()
self.courseware_page.visit()
def _bookmark_unit(self, location):
"""
Bookmark a unit
Arguments:
location (str): unit location
"""
_headers = {
'Content-type': 'application/json',
'X-CSRFToken': self.session.cookies['csrftoken'],
}
params = {'course_id': self.course_id}
data = json.dumps({'usage_id': location})
response = self.session.post(
BASE_URL + '/api/bookmarks/v1/bookmarks/',
data=data,
params=params,
headers=_headers
)
self.assertTrue(response.ok, "Failed to bookmark unit")
def _bookmark_units(self, num_units):
"""
Bookmark first `num_units` units
Arguments:
num_units(int): Number of units to bookmarks
"""
xblocks = self.course_fixture.get_nested_xblocks(category="vertical")
for index in range(num_units):
self._bookmark_unit(xblocks[index].locator)
def _breadcrumb(self, num_units, modified_name=None):
"""
Creates breadcrumbs for the first `num_units`
Arguments:
num_units(int): Number of units for which we want to create breadcrumbs
Returns:
list of breadcrumbs
"""
breadcrumbs = []
for index in range(num_units):
breadcrumbs.append(
[
'TestSection{}'.format(index),
'TestSubsection{}'.format(index),
modified_name if modified_name else 'TestVertical{}'.format(index)
]
)
return breadcrumbs
def _delete_section(self, index):
""" Delete a section at index `index` """
# Logout and login as staff
LogoutPage(self.browser).visit()
StudioAutoAuthPage(
self.browser, username=self.USERNAME, email=self.EMAIL, course_id=self.course_id, staff=True
).visit()
# Visit course outline page in studio.
self.course_outline_page.visit()
self.course_outline_page.wait_for_page()
self.course_outline_page.section_at(index).delete()
# Logout and login as a student.
LogoutPage(self.browser).visit()
LmsAutoAuthPage(self.browser, username=self.USERNAME, email=self.EMAIL, course_id=self.course_id).visit()
# Visit courseware as a student.
self.courseware_page.visit()
self.courseware_page.wait_for_page()
def _toggle_bookmark_and_verify(self, bookmark_icon_state, bookmark_button_state, bookmarked_count):
"""
Bookmark/Un-Bookmark a unit and then verify
"""
self.assertTrue(self.courseware_page.bookmark_button_visible)
self.courseware_page.click_bookmark_unit_button()
self.assertEqual(self.courseware_page.bookmark_icon_visible, bookmark_icon_state)
self.assertEqual(self.courseware_page.bookmark_button_state, bookmark_button_state)
self.bookmarks_page.click_bookmarks_button()
self.assertEqual(self.bookmarks_page.count(), bookmarked_count)
def _verify_pagination_info(
self,
bookmark_count_on_current_page,
header_text,
previous_button_enabled,
next_button_enabled,
current_page_number,
total_pages
):
"""
Verify pagination info
"""
self.assertEqual(self.bookmarks_page.count(), bookmark_count_on_current_page)
self.assertEqual(self.bookmarks_page.get_pagination_header_text(), header_text)
self.assertEqual(self.bookmarks_page.is_previous_page_button_enabled(), previous_button_enabled)
self.assertEqual(self.bookmarks_page.is_next_page_button_enabled(), next_button_enabled)
self.assertEqual(self.bookmarks_page.get_current_page_number(), current_page_number)
self.assertEqual(self.bookmarks_page.get_total_pages, total_pages)
def _navigate_to_bookmarks_list(self):
"""
Navigates and verifies the bookmarks list page.
"""
self.bookmarks_page.click_bookmarks_button()
self.assertTrue(self.bookmarks_page.results_present())
self.assertEqual(self.bookmarks_page.results_header_text(), 'My Bookmarks')
def _verify_breadcrumbs(self, num_units, modified_name=None):
"""
Verifies the breadcrumb trail.
"""
bookmarked_breadcrumbs = self.bookmarks_page.breadcrumbs()
# Verify bookmarked breadcrumbs.
breadcrumbs = self._breadcrumb(num_units=num_units, modified_name=modified_name)
breadcrumbs.reverse()
self.assertEqual(bookmarked_breadcrumbs, breadcrumbs)
def update_and_publish_block_display_name(self, modified_name):
"""
Update and publish the block/unit display name.
"""
self.course_outline_page.visit()
self.course_outline_page.wait_for_page()
self.course_outline_page.expand_all_subsections()
section = self.course_outline_page.section_at(0)
container_page = section.subsection_at(0).unit_at(0).go_to()
self.course_fixture._update_xblock(container_page.locator, { # pylint: disable=protected-access
"metadata": {
"display_name": modified_name
}
})
container_page.visit()
container_page.wait_for_page()
self.assertEqual(container_page.name, modified_name)
container_page.publish_action.click()
def test_bookmark_button(self):
"""
Scenario: Bookmark unit button toggles correctly
Given that I am a registered user
And I visit my courseware page
For first 2 units
I visit the unit
And I can see the Bookmark button
When I click on Bookmark button
Then unit should be bookmarked
Then I click again on the bookmark button
And I should see a unit un-bookmarked
"""
self._test_setup()
for index in range(2):
self.course_nav.go_to_section('TestSection{}'.format(index), 'TestSubsection{}'.format(index))
self._toggle_bookmark_and_verify(True, 'bookmarked', 1)
self.bookmarks_page.click_bookmarks_button(False)
self._toggle_bookmark_and_verify(False, '', 0)
def test_empty_bookmarks_list(self):
"""
Scenario: An empty bookmarks list is shown if there are no bookmarked units.
Given that I am a registered user
And I visit my courseware page
And I can see the Bookmarks button
When I click on Bookmarks button
Then I should see an empty bookmarks list
And empty bookmarks list content is correct
"""
self._test_setup()
self.assertTrue(self.bookmarks_page.bookmarks_button_visible())
self.bookmarks_page.click_bookmarks_button()
self.assertEqual(self.bookmarks_page.results_header_text(), 'My Bookmarks')
self.assertEqual(self.bookmarks_page.empty_header_text(), 'You have not bookmarked any courseware pages yet.')
empty_list_text = ("Use bookmarks to help you easily return to courseware pages. To bookmark a page, "
"select Bookmark in the upper right corner of that page. To see a list of all your "
"bookmarks, select Bookmarks in the upper left corner of any courseware page.")
self.assertEqual(self.bookmarks_page.empty_list_text(), empty_list_text)
def test_bookmarks_list(self):
"""
Scenario: A bookmarks list is shown if there are bookmarked units.
Given that I am a registered user
And I visit my courseware page
And I have bookmarked 2 units
When I click on Bookmarks button
Then I should see a bookmarked list with 2 bookmark links
And breadcrumb trail is correct for a bookmark
When I click on bookmarked link
Then I can navigate to correct bookmarked unit
"""
self._test_setup()
self._bookmark_units(2)
self._navigate_to_bookmarks_list()
self._verify_breadcrumbs(num_units=2)
self._verify_pagination_info(
bookmark_count_on_current_page=2,
header_text='Showing 1-2 out of 2 total',
previous_button_enabled=False,
next_button_enabled=False,
current_page_number=1,
total_pages=1
)
# get usage ids for units
xblocks = self.course_fixture.get_nested_xblocks(category="vertical")
xblock_usage_ids = [xblock.locator for xblock in xblocks]
# Verify link navigation
for index in range(2):
self.bookmarks_page.click_bookmarked_block(index)
self.courseware_page.wait_for_page()
self.assertIn(self.courseware_page.active_usage_id(), xblock_usage_ids)
self.courseware_page.visit().wait_for_page()
self.bookmarks_page.click_bookmarks_button()
def test_bookmark_shows_updated_breadcrumb_after_publish(self):
"""
Scenario: A bookmark breadcrumb trail is updated after publishing the changed display name.
Given that I am a registered user
And I visit my courseware page
And I can see bookmarked unit
Then I visit unit page in studio
Then I change unit display_name
And I publish the changes
Then I visit my courseware page
And I visit bookmarks list page
When I see the bookmark
Then I can see the breadcrumb trail
with updated display_name.
"""
self._test_setup(num_chapters=1)
self._bookmark_units(num_units=1)
self._navigate_to_bookmarks_list()
self._verify_breadcrumbs(num_units=1)
LogoutPage(self.browser).visit()
LmsAutoAuthPage(
self.browser,
username=self.USERNAME,
email=self.EMAIL,
course_id=self.course_id,
staff=True
).visit()
modified_name = "Updated name"
self.update_and_publish_block_display_name(modified_name)
LogoutPage(self.browser).visit()
LmsAutoAuthPage(self.browser, username=self.USERNAME, email=self.EMAIL, course_id=self.course_id).visit()
self.courseware_page.visit()
self._navigate_to_bookmarks_list()
self._verify_breadcrumbs(num_units=1, modified_name=modified_name)
def test_unreachable_bookmark(self):
"""
Scenario: We should get a HTTP 404 for an unreachable bookmark.
Given that I am a registered user
And I visit my courseware page
And I have bookmarked 2 units
Then I delete a bookmarked unit
Then I click on Bookmarks button
And I should see a bookmarked list
When I click on deleted bookmark
Then I should navigated to 404 page
"""
self._test_setup(num_chapters=1)
self._bookmark_units(1)
self._delete_section(0)
self._navigate_to_bookmarks_list()
self._verify_pagination_info(
bookmark_count_on_current_page=1,
header_text='Showing 1 out of 1 total',
previous_button_enabled=False,
next_button_enabled=False,
current_page_number=1,
total_pages=1
)
self.bookmarks_page.click_bookmarked_block(0)
self.assertTrue(is_404_page(self.browser))
def test_page_size_limit(self):
"""
Scenario: We can't get bookmarks more than default page size.
Given that I am a registered user
And I visit my courseware page
And I have bookmarked all the 11 units available
Then I click on Bookmarks button
And I should see a bookmarked list
And bookmark list contains 10 bookmarked items
"""
self._test_setup(11)
self._bookmark_units(11)
self._navigate_to_bookmarks_list()
self._verify_pagination_info(
bookmark_count_on_current_page=10,
header_text='Showing 1-10 out of 11 total',
previous_button_enabled=False,
next_button_enabled=True,
current_page_number=1,
total_pages=2
)
def test_pagination_with_single_page(self):
"""
Scenario: Bookmarks list pagination is working as expected for single page
Given that I am a registered user
And I visit my courseware page
And I have bookmarked all the 2 units available
Then I click on Bookmarks button
And I should see a bookmarked list with 2 bookmarked items
And I should see paging header and footer with correct data
And previous and next buttons are disabled
"""
self._test_setup(num_chapters=2)
self._bookmark_units(num_units=2)
self.bookmarks_page.click_bookmarks_button()
self.assertTrue(self.bookmarks_page.results_present())
self._verify_pagination_info(
bookmark_count_on_current_page=2,
header_text='Showing 1-2 out of 2 total',
previous_button_enabled=False,
next_button_enabled=False,
current_page_number=1,
total_pages=1
)
def test_next_page_button(self):
"""
Scenario: Next button is working as expected for bookmarks list pagination
Given that I am a registered user
And I visit my courseware page
And I have bookmarked all the 12 units available
Then I click on Bookmarks button
And I should see a bookmarked list of 10 items
And I should see paging header and footer with correct info
Then I click on next page button in footer
And I should be navigated to second page
And I should see a bookmarked list with 2 items
And I should see paging header and footer with correct info
"""
self._test_setup(num_chapters=12)
self._bookmark_units(num_units=12)
self.bookmarks_page.click_bookmarks_button()
self.assertTrue(self.bookmarks_page.results_present())
self._verify_pagination_info(
bookmark_count_on_current_page=10,
header_text='Showing 1-10 out of 12 total',
previous_button_enabled=False,
next_button_enabled=True,
current_page_number=1,
total_pages=2
)
self.bookmarks_page.press_next_page_button()
self._verify_pagination_info(
bookmark_count_on_current_page=2,
header_text='Showing 11-12 out of 12 total',
previous_button_enabled=True,
next_button_enabled=False,
current_page_number=2,
total_pages=2
)
def test_previous_page_button(self):
"""
Scenario: Previous button is working as expected for bookmarks list pagination
Given that I am a registered user
And I visit my courseware page
And I have bookmarked all the 12 units available
And I click on Bookmarks button
Then I click on next page button in footer
And I should be navigated to second page
And I should see a bookmarked list with 2 items
And I should see paging header and footer with correct info
Then I click on previous page button
And I should be navigated to first page
And I should see paging header and footer with correct info
"""
self._test_setup(num_chapters=12)
self._bookmark_units(num_units=12)
self.bookmarks_page.click_bookmarks_button()
self.assertTrue(self.bookmarks_page.results_present())
self.bookmarks_page.press_next_page_button()
self._verify_pagination_info(
bookmark_count_on_current_page=2,
header_text='Showing 11-12 out of 12 total',
previous_button_enabled=True,
next_button_enabled=False,
current_page_number=2,
total_pages=2
)
self.bookmarks_page.press_previous_page_button()
self._verify_pagination_info(
bookmark_count_on_current_page=10,
header_text='Showing 1-10 out of 12 total',
previous_button_enabled=False,
next_button_enabled=True,
current_page_number=1,
total_pages=2
)
def test_pagination_with_valid_page_number(self):
"""
Scenario: Bookmarks list pagination works as expected for valid page number
Given that I am a registered user
And I visit my courseware page
And I have bookmarked all the 12 units available
Then I click on Bookmarks button
And I should see a bookmarked list
And I should see total page value is 2
Then I enter 2 in the page number input
And I should be navigated to page 2
"""
self._test_setup(num_chapters=11)
self._bookmark_units(num_units=11)
self.bookmarks_page.click_bookmarks_button()
self.assertTrue(self.bookmarks_page.results_present())
self.assertEqual(self.bookmarks_page.get_total_pages, 2)
self.bookmarks_page.go_to_page(2)
self._verify_pagination_info(
bookmark_count_on_current_page=1,
header_text='Showing 11-11 out of 11 total',
previous_button_enabled=True,
next_button_enabled=False,
current_page_number=2,
total_pages=2
)
def test_pagination_with_invalid_page_number(self):
"""
Scenario: Bookmarks list pagination works as expected for invalid page number
Given that I am a registered user
And I visit my courseware page
And I have bookmarked all the 11 units available
Then I click on Bookmarks button
And I should see a bookmarked list
And I should see total page value is 2
Then I enter 3 in the page number input
And I should stay at page 1
"""
self._test_setup(num_chapters=11)
self._bookmark_units(num_units=11)
self.bookmarks_page.click_bookmarks_button()
self.assertTrue(self.bookmarks_page.results_present())
self.assertEqual(self.bookmarks_page.get_total_pages, 2)
self.bookmarks_page.go_to_page(3)
self._verify_pagination_info(
bookmark_count_on_current_page=10,
header_text='Showing 1-10 out of 11 total',
previous_button_enabled=False,
next_button_enabled=True,
current_page_number=1,
total_pages=2
)
def test_bookmarked_unit_accessed_event(self):
"""
Scenario: Bookmark events are emitted with correct data when we access/visit a bookmarked unit.
Given that I am a registered user
And I visit my courseware page
And I have bookmarked a unit
When I click on bookmarked unit
Then `edx.course.bookmark.accessed` event is emitted
"""
self._test_setup(num_chapters=1)
self.reset_event_tracking()
# create expected event data
xblocks = self.course_fixture.get_nested_xblocks(category="vertical")
event_data = [
{
'event': {
'bookmark_id': '{},{}'.format(self.USERNAME, xblocks[0].locator),
'component_type': xblocks[0].category,
'component_usage_id': xblocks[0].locator,
}
}
]
self._bookmark_units(num_units=1)
self.bookmarks_page.click_bookmarks_button()
self._verify_pagination_info(
bookmark_count_on_current_page=1,
header_text='Showing 1 out of 1 total',
previous_button_enabled=False,
next_button_enabled=False,
current_page_number=1,
total_pages=1
)
self.bookmarks_page.click_bookmarked_block(0)
self.verify_event_data('edx.bookmark.accessed', event_data)
|
solashirai/edx-platform
|
common/test/acceptance/tests/lms/test_bookmarks.py
|
Python
|
agpl-3.0
| 23,733
|
[
"VisIt"
] |
6c3594bb0c82b416a187a26967d4a5237b201b498fa3a712f810dc92c588a838
|
#! /usr/bin/env python3
# -*- coding: utf-8 -*-
# Copyright 2022 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import argparse
import os
import libcst as cst
import pathlib
import sys
from typing import (Any, Callable, Dict, List, Sequence, Tuple)
def partition(
predicate: Callable[[Any], bool],
iterator: Sequence[Any]
) -> Tuple[List[Any], List[Any]]:
"""A stable, out-of-place partition."""
results = ([], [])
for i in iterator:
results[int(predicate(i))].append(i)
# Returns trueList, falseList
return results[1], results[0]
class eventarc_publishingCallTransformer(cst.CSTTransformer):
CTRL_PARAMS: Tuple[str] = ('retry', 'timeout', 'metadata')
METHOD_TO_PARAMS: Dict[str, Tuple[str]] = {
'publish_channel_connection_events': ('channel_connection', 'events', ),
}
def leave_Call(self, original: cst.Call, updated: cst.Call) -> cst.CSTNode:
try:
key = original.func.attr.value
kword_params = self.METHOD_TO_PARAMS[key]
except (AttributeError, KeyError):
# Either not a method from the API or too convoluted to be sure.
return updated
# If the existing code is valid, keyword args come after positional args.
# Therefore, all positional args must map to the first parameters.
args, kwargs = partition(lambda a: not bool(a.keyword), updated.args)
if any(k.keyword.value == "request" for k in kwargs):
# We've already fixed this file, don't fix it again.
return updated
kwargs, ctrl_kwargs = partition(
lambda a: a.keyword.value not in self.CTRL_PARAMS,
kwargs
)
args, ctrl_args = args[:len(kword_params)], args[len(kword_params):]
ctrl_kwargs.extend(cst.Arg(value=a.value, keyword=cst.Name(value=ctrl))
for a, ctrl in zip(ctrl_args, self.CTRL_PARAMS))
request_arg = cst.Arg(
value=cst.Dict([
cst.DictElement(
cst.SimpleString("'{}'".format(name)),
cst.Element(value=arg.value)
)
# Note: the args + kwargs looks silly, but keep in mind that
# the control parameters had to be stripped out, and that
# those could have been passed positionally or by keyword.
for name, arg in zip(kword_params, args + kwargs)]),
keyword=cst.Name("request")
)
return updated.with_changes(
args=[request_arg] + ctrl_kwargs
)
def fix_files(
in_dir: pathlib.Path,
out_dir: pathlib.Path,
*,
transformer=eventarc_publishingCallTransformer(),
):
"""Duplicate the input dir to the output dir, fixing file method calls.
Preconditions:
* in_dir is a real directory
* out_dir is a real, empty directory
"""
pyfile_gen = (
pathlib.Path(os.path.join(root, f))
for root, _, files in os.walk(in_dir)
for f in files if os.path.splitext(f)[1] == ".py"
)
for fpath in pyfile_gen:
with open(fpath, 'r') as f:
src = f.read()
# Parse the code and insert method call fixes.
tree = cst.parse_module(src)
updated = tree.visit(transformer)
# Create the path and directory structure for the new file.
updated_path = out_dir.joinpath(fpath.relative_to(in_dir))
updated_path.parent.mkdir(parents=True, exist_ok=True)
# Generate the updated source file at the corresponding path.
with open(updated_path, 'w') as f:
f.write(updated.code)
if __name__ == '__main__':
parser = argparse.ArgumentParser(
description="""Fix up source that uses the eventarc_publishing client library.
The existing sources are NOT overwritten but are copied to output_dir with changes made.
Note: This tool operates at a best-effort level at converting positional
parameters in client method calls to keyword based parameters.
Cases where it WILL FAIL include
A) * or ** expansion in a method call.
B) Calls via function or method alias (includes free function calls)
C) Indirect or dispatched calls (e.g. the method is looked up dynamically)
These all constitute false negatives. The tool will also detect false
positives when an API method shares a name with another method.
""")
parser.add_argument(
'-d',
'--input-directory',
required=True,
dest='input_dir',
help='the input directory to walk for python files to fix up',
)
parser.add_argument(
'-o',
'--output-directory',
required=True,
dest='output_dir',
help='the directory to output files fixed via un-flattening',
)
args = parser.parse_args()
input_dir = pathlib.Path(args.input_dir)
output_dir = pathlib.Path(args.output_dir)
if not input_dir.is_dir():
print(
f"input directory '{input_dir}' does not exist or is not a directory",
file=sys.stderr,
)
sys.exit(-1)
if not output_dir.is_dir():
print(
f"output directory '{output_dir}' does not exist or is not a directory",
file=sys.stderr,
)
sys.exit(-1)
if os.listdir(output_dir):
print(
f"output directory '{output_dir}' is not empty",
file=sys.stderr,
)
sys.exit(-1)
fix_files(input_dir, output_dir)
|
googleapis/python-eventarc-publishing
|
scripts/fixup_eventarc_publishing_v1_keywords.py
|
Python
|
apache-2.0
| 6,018
|
[
"VisIt"
] |
20e4cf7b9ef443120d5f8f04d22ee11314c189449beafe67369c0fd8caabe1c4
|
'''
Code to generate LTSA for the car project
Brian Gravelle
'''
MAX_CARS = 4
file = open("testfile.txt","w")
file.write("wait response do not use!!")
file.write("\n")
file.write("\n")
file.write("\n")
lower_priority = \
"""{2}receive_request[{0}][ID][Lanes] -> send_permission[ID][{0}] -> WAIT_RESPONSE{1}[ll2][ll3][ll4][l] |
"""
lower_priority1 = \
"""{2}receive_permission[{0}][ID] -> WAIT_RESPONSE{1}[ll2][ll3][ll4][l] |
"""
low_list = ["", "[{}][hl3][hl4]","[hl2][{}][hl4]","[hl2][hl3][{}]"]
for i in range(1,MAX_CARS+1):
file.write("\n")
cnt = i
cnt1 = 1
when_str = "when (ID == {}) ".format(i)
for j in range(1,MAX_CARS+1):
if i < j:
file.write(lower_priority.format(j, low_list[cnt].format("True"), when_str))
cnt+=1
if i != j:
file.write(lower_priority1.format(j, low_list[cnt1].format("False"), when_str))
cnt1+=1
file.write("\nwhen(!hl2 && !hl3 && !hl4) time_out[ID] -> WAIT_PERMISSION[hl2][hl3][hl4][ll2][ll3][ll4][l]")
file.write("\n")
file.write("\n")
file.write("\n")
file.write("wait permission")
file.write("\n")
file.write("\n")
file.write("\n")
file.write("when(!hl2 && !hl3 && !hl4) enter[ID] -> CRITICAL[ll2][ll3][ll4][l] | // not waiting for anyone\n")
lower_priority = \
"""{2} receive_permission[{0}][ID] -> WAIT_PERMISSION{1}[ll2][ll3][ll4][l] |
"""
low_list = ["", "[{}][hl3][hl4]","[hl2][{}][hl4]","[hl2][hl3][{}]"]
low_list2 = ["", "[{}][ll3][ll4]","[ll2][{}][ll4]","[ll2][ll3][{}]"]
when_lst = ["", "hl2", "hl3", "hl4"]
for i in range(1,MAX_CARS):
file.write("\n")
cnt = i
for j in range(1,MAX_CARS+1):
when_str = "when (ID == {} && {}) ".format(i, when_lst[cnt])
if i < j:
file.write(lower_priority.format(j, low_list[cnt].format("False"), when_str))
cnt+=1
lower_priority = \
'''{2} receive_request[{0}][ID][Lanes] -> WAIT_PERMISSION[hl2][hl3][hl4]{1} |
'''
for i in range(1,MAX_CARS+1):
file.write("\n")
cnt = 1
when_str = "when (ID == {}) ".format(i)
for j in range(1,MAX_CARS+1):
if i != j:
file.write(lower_priority.format(j, low_list2[cnt].format("True"), when_str))
cnt+=1
file.write("\n")
file.write("\n")
file.write("\n")
file.write("critical")
file.write("\n")
file.write("\n")
file.write("\n")
lower_priority = \
"""{2} receive_request[{0}][ID][Lanes] -> CRITICAL{1}[l] |
"""
low_list2 = ["", "[{}][ll3][ll4]","[ll2][{}][ll4]","[ll2][ll3][{}]"]
for i in range(1,MAX_CARS+1):
file.write("\n")
cnt = 1
when_str = "when (ID == {}) ".format(i)
for j in range(1,MAX_CARS+1):
if i != j:
file.write(lower_priority.format(j, low_list2[cnt].format("True"), when_str))
cnt+=1
file.write("""\nexit[ID] -> CLEANUP[ll2][ll3][ll4][l] |
go_slow[ID] -> CRITICAL[ll2][ll3][ll4][l]""")
file.write("\n")
file.write("\n")
file.write("\n")
file.write("cleanup")
file.write("\n")
file.write("\n")
file.write("\n")
lower_priority = \
"""{2} send_permission[ID][{0}] -> CLEANUP{1}[l] |
"""
low_list2 = ["", "[{}][ll3][ll4]","[ll2][{}][ll4]","[ll2][ll3][{}]"]
when_lst = ["", "ll2", "ll3", "ll4",""]
for i in range(1,MAX_CARS+1):
file.write("\n")
cnt = 1
for j in range(1,MAX_CARS+1):
when_str = "when (ID == {} && {}) ".format(i, when_lst[cnt])
if i != j:
file.write(lower_priority.format(j, low_list2[cnt].format("False"), when_str))
cnt+=1
file.write("""\nwhen(!ll2 && !ll3 && !ll4) restarting[ID] -> STARTUP |""")
file.write("\n")
file.write("\n")
file.write("\n")
file.write("sendall")
file.write("\n")
file.write("\n")
file.write("\n")
lower_priority = \
"""{1} send_request[ID][{0}][l] -> SENDALL[a][b][c][cnt-1] |
"""
for i in range(1,MAX_CARS+1):
file.write("\n")
cnt = i
for j in range(1,MAX_CARS+1):
when_str = "when (ID == {} && cnt > 0) ".format(i)
if i != j:
file.write(lower_priority.format(j, when_str))
cnt+=1
file.write("""\nwhen(cnt == 0) dummy[ID] -> BCAST""")
file.close()
|
mastino/Internet_of_Thrones
|
final/ltsa/gen.py
|
Python
|
gpl-3.0
| 4,090
|
[
"Brian"
] |
987fc675ec45dea4459448e12a34c8f3658875be9d60ba494dc8a78230b646fd
|
# copyright 2003-2013 LOGILAB S.A. (Paris, FRANCE), all rights reserved.
# contact http://www.logilab.fr/ -- mailto:contact@logilab.fr
#
# This file is part of astroid.
#
# astroid is free software: you can redistribute it and/or modify it
# under the terms of the GNU Lesser General Public License as published by the
# Free Software Foundation, either version 2.1 of the License, or (at your
# option) any later version.
#
# astroid is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License
# for more details.
#
# You should have received a copy of the GNU Lesser General Public License along
# with astroid. If not, see <http://www.gnu.org/licenses/>.
"""astroid manager: avoid multiple astroid build of a same module when
possible by providing a class responsible to get astroid representation
from various source and using a cache of built modules)
"""
import imp
import os
import sys
import zipimport
import six
from astroid import exceptions
from astroid import modutils
from astroid import transforms
from astroid import util
def safe_repr(obj):
try:
return repr(obj)
except Exception: # pylint: disable=broad-except
return '???'
class AstroidManager(object):
"""the astroid manager, responsible to build astroid from files
or modules.
Use the Borg pattern.
"""
name = 'astroid loader'
brain = {}
def __init__(self):
self.__dict__ = AstroidManager.brain
if not self.__dict__:
# NOTE: cache entries are added by the [re]builder
self.astroid_cache = {}
self._mod_file_cache = {}
self._failed_import_hooks = []
self.always_load_extensions = False
self.optimize_ast = False
self.extension_package_whitelist = set()
self._transform = transforms.TransformVisitor()
# Export these APIs for convenience
self.register_transform = self._transform.register_transform
self.unregister_transform = self._transform.unregister_transform
def visit_transforms(self, node):
"""Visit the transforms and apply them to the given *node*."""
return self._transform.visit(node)
def ast_from_file(self, filepath, modname=None, fallback=True, source=False):
"""given a module name, return the astroid object"""
try:
filepath = modutils.get_source_file(filepath, include_no_ext=True)
source = True
except modutils.NoSourceFile:
pass
if modname is None:
try:
modname = '.'.join(modutils.modpath_from_file(filepath))
except ImportError:
modname = filepath
if modname in self.astroid_cache and self.astroid_cache[modname].file == filepath:
return self.astroid_cache[modname]
if source:
from astroid.builder import AstroidBuilder
return AstroidBuilder(self).file_build(filepath, modname)
elif fallback and modname:
return self.ast_from_module_name(modname)
raise exceptions.AstroidBuildingError(
'Unable to build an AST for {path}.', path=filepath)
def _build_stub_module(self, modname):
from astroid.builder import AstroidBuilder
return AstroidBuilder(self).string_build('', modname)
def _can_load_extension(self, modname):
if self.always_load_extensions:
return True
if modutils.is_standard_module(modname):
return True
parts = modname.split('.')
return any(
'.'.join(parts[:x]) in self.extension_package_whitelist
for x in range(1, len(parts) + 1))
def ast_from_module_name(self, modname, context_file=None):
"""given a module name, return the astroid object"""
if modname in self.astroid_cache:
return self.astroid_cache[modname]
if modname == '__main__':
return self._build_stub_module(modname)
old_cwd = os.getcwd()
if context_file:
os.chdir(os.path.dirname(context_file))
try:
filepath, mp_type = self.file_from_module_name(modname, context_file)
if mp_type == modutils.PY_ZIPMODULE:
module = self.zip_import_data(filepath)
if module is not None:
return module
elif mp_type in (imp.C_BUILTIN, imp.C_EXTENSION):
if mp_type == imp.C_EXTENSION and not self._can_load_extension(modname):
return self._build_stub_module(modname)
try:
module = modutils.load_module_from_name(modname)
except Exception as ex: # pylint: disable=broad-except
util.reraise(exceptions.AstroidImportError(
'Loading {modname} failed with:\n{error}',
modname=modname, path=filepath, error=ex))
return self.ast_from_module(module, modname)
elif mp_type == imp.PY_COMPILED:
raise exceptions.AstroidImportError(
"Unable to load compiled module {modname}.",
modname=modname, path=filepath)
if filepath is None:
raise exceptions.AstroidImportError(
"Can't find a file for module {modname}.",
modname=modname)
return self.ast_from_file(filepath, modname, fallback=False)
except exceptions.AstroidBuildingError as e:
for hook in self._failed_import_hooks:
try:
return hook(modname)
except exceptions.AstroidBuildingError:
pass
raise e
finally:
os.chdir(old_cwd)
def zip_import_data(self, filepath):
if zipimport is None:
return None
from astroid.builder import AstroidBuilder
builder = AstroidBuilder(self)
for ext in ('.zip', '.egg'):
try:
eggpath, resource = filepath.rsplit(ext + os.path.sep, 1)
except ValueError:
continue
try:
importer = zipimport.zipimporter(eggpath + ext)
zmodname = resource.replace(os.path.sep, '.')
if importer.is_package(resource):
zmodname = zmodname + '.__init__'
module = builder.string_build(importer.get_source(resource),
zmodname, filepath)
return module
except Exception: # pylint: disable=broad-except
continue
return None
def file_from_module_name(self, modname, contextfile):
try:
value = self._mod_file_cache[(modname, contextfile)]
traceback = sys.exc_info()[2]
except KeyError:
try:
value = modutils.file_info_from_modpath(
modname.split('.'), context_file=contextfile)
traceback = sys.exc_info()[2]
except ImportError as ex:
value = exceptions.AstroidImportError(
'Failed to import module {modname} with error:\n{error}.',
modname=modname, error=ex)
traceback = sys.exc_info()[2]
self._mod_file_cache[(modname, contextfile)] = value
if isinstance(value, exceptions.AstroidBuildingError):
six.reraise(exceptions.AstroidBuildingError,
value, traceback)
return value
def ast_from_module(self, module, modname=None):
"""given an imported module, return the astroid object"""
modname = modname or module.__name__
if modname in self.astroid_cache:
return self.astroid_cache[modname]
try:
# some builtin modules don't have __file__ attribute
filepath = module.__file__
if modutils.is_python_source(filepath):
return self.ast_from_file(filepath, modname)
except AttributeError:
pass
from astroid.builder import AstroidBuilder
return AstroidBuilder(self).module_build(module, modname)
def ast_from_class(self, klass, modname=None):
"""get astroid for the given class"""
if modname is None:
try:
modname = klass.__module__
except AttributeError:
util.reraise(exceptions.AstroidBuildingError(
'Unable to get module for class {class_name}.',
cls=klass, class_repr=safe_repr(klass), modname=modname))
modastroid = self.ast_from_module_name(modname)
return modastroid.getattr(klass.__name__)[0] # XXX
def infer_ast_from_something(self, obj, context=None):
"""infer astroid for the given class"""
if hasattr(obj, '__class__') and not isinstance(obj, type):
klass = obj.__class__
else:
klass = obj
try:
modname = klass.__module__
except AttributeError:
util.reraise(exceptions.AstroidBuildingError(
'Unable to get module for {class_repr}.',
cls=klass, class_repr=safe_repr(klass)))
except Exception as ex: # pylint: disable=broad-except
util.reraise(exceptions.AstroidImportError(
'Unexpected error while retrieving module for {class_repr}:\n'
'{error}', cls=klass, class_repr=safe_repr(klass), error=ex))
try:
name = klass.__name__
except AttributeError:
util.reraise(exceptions.AstroidBuildingError(
'Unable to get name for {class_repr}:\n',
cls=klass, class_repr=safe_repr(klass)))
except Exception as ex: # pylint: disable=broad-except
util.reraise(exceptions.AstroidImportError(
'Unexpected error while retrieving name for {class_repr}:\n'
'{error}', cls=klass, class_repr=safe_repr(klass), error=ex))
# take care, on living object __module__ is regularly wrong :(
modastroid = self.ast_from_module_name(modname)
if klass is obj:
for inferred in modastroid.igetattr(name, context):
yield inferred
else:
for inferred in modastroid.igetattr(name, context):
yield inferred.instantiate_class()
def register_failed_import_hook(self, hook):
"""Registers a hook to resolve imports that cannot be found otherwise.
`hook` must be a function that accepts a single argument `modname` which
contains the name of the module or package that could not be imported.
If `hook` can resolve the import, must return a node of type `astroid.Module`,
otherwise, it must raise `AstroidBuildingError`.
"""
self._failed_import_hooks.append(hook)
def cache_module(self, module):
"""Cache a module if no module with the same name is known yet."""
self.astroid_cache.setdefault(module.name, module)
def clear_cache(self, astroid_builtin=None):
# XXX clear transforms
self.astroid_cache.clear()
# force bootstrap again, else we may ends up with cache inconsistency
# between the manager and CONST_PROXY, making
# unittest_lookup.LookupTC.test_builtin_lookup fail depending on the
# test order
import astroid.raw_building
astroid.raw_building._astroid_bootstrapping(
astroid_builtin=astroid_builtin)
|
pylint-bot/astroid-unofficial
|
astroid/manager.py
|
Python
|
gpl-2.0
| 11,763
|
[
"VisIt"
] |
dd93f7e583fb7aa16f3785ec4401ff83cf7d4fc0b6267945e8a68165406a8010
|
# -*- coding: utf-8 -*-
# squidgui.py ---
#
# Filename: squidgui.py
# Description:
# Author:
# Maintainer:
# Created: Mon Jul 9 18:23:55 2012 (+0530)
# Version:
# Last-Updated: Tue May 3 00:18:47 2016 (-0400)
# By: subha
# Update #: 1078
# URL:
# Keywords:
# Compatibility:
#
#
# Commentary:
#
#
#
#
# Change log:
#
#
#
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 3, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; see the file COPYING. If not, write to
# the Free Software Foundation, Inc., 51 Franklin Street, Fifth
# Floor, Boston, MA 02110-1301, USA.
#
#
# Code:
import sys
import os
from collections import defaultdict
import time
try:
from PyQt4 import QtGui
from PyQt4 import QtCore
except ImportError as e:
print( "[INFO ] Could not import PyQt4. Quitting..." )
quit()
import numpy
from matplotlib.figure import Figure
from matplotlib.backends.backend_qt4agg import FigureCanvasQTAgg as FigureCanvas
from matplotlib.backends.backend_qt4agg import NavigationToolbar2QT as NavigationToolbar
import moose
from squid import *
from squid_setup import SquidSetup
from electronics import ClampCircuit
tooltip_Nernst = """<h3>Ionic equilibrium potential</h3>
<p/>
The equilibrium potential for ion C is given by Nernst equation:
<p>
E<sub>C</sub> = (RT/zF) * ln([C]<sub>out</sub> / [C]<sub>in</sub>)
</p>
where R is the ideal gas constant (8.3145 J/mol K),<br>
T is absolute temperature,<br>
z is the valence of the ion,<br>
F is Faraday's constant 96480 C/mol,<br>
[C]<sub>out</sub> is concentration of C outside the membrane,<br>
[C]<sub>in</sub> is concentration of C inside the membrane."""
tooltip_Erest = """<h3>Resting membrane potential</h3>
<p/>
The resting membrane potential is determined by the ionic
concentrations inside and outside the cell membrane and is given by
the Goldman-Hodgkin-Katz equation:
<p>
V = (RT/F) * ln((P<sub>K</sub>[K<sup>+</sup>]<sub>out</sub> + P<sub>Na</sub>[Na<sup>+</sup>]<sub>out</sub> + P<sub>Cl</sub>[Cl<sup>-</sup>]<sub>in</sub>) / (P<sub>K</sub>[K<sup>+</sup>]in + P<sub>Na</sub>[Na<sup>+</sup>]<sub>in</sub> + P<sub>Cl</sub>[Cl<sup>-</sup>]<sub>out</sub>))
</p>
where P<sub>C</sub> is the permeability of the membrane to ion C.
"""
tooltip_NaChan = """<h3>Na+ channel conductance</h3>
<p/>
The Na<sup>+</sup> channel conductance in squid giant axon is given by:
<p> G<sub>Na</sub> = Ḡ<sub>Na</sub> * m<sup>3</sup> * h </p>
and the current through this channel is:
<p>
I<sub>Na</sub> = G<sub>Na</sub> * (V - E<sub>Na</sub>) = Ḡ<sub>Na</sub> * m<sup>3</sup> * h * (V - E<sub>Na</sub>)
</p>
where Ḡ<sub>Na</sub> is the peak conductance of Na<sup>+</sup> channel, m is
the fraction of activation gates open and h is the fraction of
deactivation gates open. The transition from open to closed state has
first order kinetics:
<p> dm/dt = α<sub>m</sub> * ( 1 - m) - β<sub>m</sub> * m </p>
and similarly for h.
The steady state values are:
<p> m<sub>∞</sub> = α<sub>m</sub>/(α<sub>m</sub> + β<sub>m</sub>) </p>
and time constant for steady state is:
<p>τ<sub>m</sub> = 1/ (α<sub>m</sub> + β<sub>m</sub>) </p>
and similarly for h.
"""
tooltip_KChan = """<h3>K+ channel conductance</h3>
<p/>The K+ channel conductance in squid giant axon is given by:
<p> G<sub>K</sub> = Ḡ<sub>K</sub> * n<sup>4</sup></p>
and the current through this channel is:
<p>
I<sub>K</sub> = G<sub>K</sub> * (V - E<sub>K</sub>) = Ḡ<sub>K</sub> * n<sup>4</sup> * (V - E<sub>K</sub>)
</p>
where Ḡ<sub>K</sub> is the peak conductance of K<sup>+</sup> channel,
n is the fraction of activation gates open. The transition from open
to closed state has first order kinetics: <p> dn/dt = α<sub>n</sub> *
( 1 - n) - β<sub>n</sub> * n </p>.
The steady state values are:
<p>
n<sub>∞</sub> = α<sub>n</sub>/(α<sub>n</sub> + β<sub>n</sub>)
</p>
and time constant for steady state is:
<p>
τ<sub>n</sub> = 1/ (α<sub>n</sub> + β<sub>n</sub>)
</p>
and similarly for h.
"""
tooltip_Im = """<h3>Membrane current</h3>
<p/>
The current through the membrane is given by:
<p>
I<sub>m</sub> = C<sub>m</sub> dV/dt + I<sub>K</sub> + I<sub>Na</sub> + I<sub>L</sub>
</p><p>
= C<sub>m</sub> dV/dt + G<sub>K</sub>(V, t) * (V - E<sub>K</sub>) + G<sub>Na</sub> * (V - E<sub>Na</sub>) + G<sub>L</sub> * (V - E<sub>L</sub>)
</p>
where G<sub>L</sub> is the leak current and E<sub>L</sub> is the leak reversal potential.
"""
default_line_edit_size = QtCore.QSize(80, 25)
def set_default_line_edit_size(widget):
widget.setMinimumSize(default_line_edit_size)
widget.setMaximumSize(default_line_edit_size)
class SquidGui(QtGui.QMainWindow):
defaults = {}
defaults.update(SquidAxon.defaults)
defaults.update(ClampCircuit.defaults)
defaults.update({'runtime': 50.0,
'simdt': 0.01,
'plotdt': 0.1,
'vclamp.holdingV': 0.0,
'vclamp.holdingT': 10.0,
'vclamp.prepulseV': 0.0,
'vclamp.prepulseT': 0.0,
'vclamp.clampV': 50.0,
'vclamp.clampT': 20.0,
'iclamp.baseI': 0.0,
'iclamp.firstI': 0.1,
'iclamp.firstT': 40.0,
'iclamp.firstD': 5.0,
'iclamp.secondI': 0.0,
'iclamp.secondT': 0.0,
'iclamp.secondD': 0.0
})
def __init__(self, *args):
QtGui.QMainWindow.__init__(self, *args)
self.squid_setup = SquidSetup()
self._plotdt = SquidGui.defaults['plotdt']
self._plot_dict = defaultdict(list)
self.setWindowTitle('Squid Axon simulation')
self.setDockNestingEnabled(True)
self._createRunControl()
self.addDockWidget(QtCore.Qt.LeftDockWidgetArea, self._runControlDock)
self._runControlDock.setFeatures(QtGui.QDockWidget.AllDockWidgetFeatures)
self._createChannelControl()
self._channelCtrlBox.setWindowTitle('Channel properties')
self._channelControlDock.setFeatures(QtGui.QDockWidget.AllDockWidgetFeatures)
self.addDockWidget(QtCore.Qt.LeftDockWidgetArea, self._channelControlDock)
self._createElectronicsControl()
self._electronicsDock.setFeatures(QtGui.QDockWidget.AllDockWidgetFeatures)
self._electronicsDock.setWindowTitle('Electronics')
self.addDockWidget(QtCore.Qt.LeftDockWidgetArea, self._electronicsDock)
self._createPlotWidget()
self.setCentralWidget(self._plotWidget)
self._createStatePlotWidget()
self._createHelpMessage()
self._helpWindow.setVisible(False)
self._statePlotWidget.setWindowFlags(QtCore.Qt.Window)
self._statePlotWidget.setWindowTitle('State plot')
self._initActions()
self._createRunToolBar()
self._createPlotToolBar()
def getFloatInput(self, widget, name):
try:
return float(str(widget.text()))
except ValueError:
QtGui.QMessageBox.critical(self, 'Invalid input', 'Please enter a valid number for {}'.format(name))
raise
def _createPlotWidget(self):
self._plotWidget = QtGui.QWidget()
self._plotFigure = Figure()
self._plotCanvas = FigureCanvas(self._plotFigure)
self._plotCanvas.setSizePolicy(QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Expanding)
self._plotCanvas.updateGeometry()
self._plotCanvas.setParent(self._plotWidget)
self._plotCanvas.mpl_connect('scroll_event', self._onScroll)
self._plotFigure.set_canvas(self._plotCanvas)
# Vm and command voltage go in the same subplot
self._vm_axes = self._plotFigure.add_subplot(2,2,1, title='Membrane potential')
self._vm_axes.set_ylim(-20.0, 120.0)
# Channel conductances go to the same subplot
self._g_axes = self._plotFigure.add_subplot(2,2,2, title='Channel conductance')
self._g_axes.set_ylim(0.0, 0.5)
# Injection current for Vclamp/Iclamp go to the same subplot
self._im_axes = self._plotFigure.add_subplot(2,2,3, title='Injection current')
self._im_axes.set_ylim(-0.5, 0.5)
# Channel currents go to the same subplot
self._i_axes = self._plotFigure.add_subplot(2,2,4, title='Channel current')
self._i_axes.set_ylim(-10, 10)
for axis in self._plotFigure.axes:
axis.set_autoscale_on(False)
layout = QtGui.QVBoxLayout()
layout.addWidget(self._plotCanvas)
self._plotNavigator = NavigationToolbar(self._plotCanvas, self._plotWidget)
layout.addWidget(self._plotNavigator)
self._plotWidget.setLayout(layout)
def _createStatePlotWidget(self):
self._statePlotWidget = QtGui.QWidget()
self._statePlotFigure = Figure()
self._statePlotCanvas = FigureCanvas(self._statePlotFigure)
self._statePlotCanvas.setSizePolicy(QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Expanding)
self._statePlotCanvas.updateGeometry()
self._statePlotCanvas.setParent(self._statePlotWidget)
self._statePlotFigure.set_canvas(self._statePlotCanvas)
self._statePlotFigure.subplots_adjust(hspace=0.5)
self._statePlotAxes = self._statePlotFigure.add_subplot(2,1,1, title='State plot')
self._state_plot, = self._statePlotAxes.plot([], [], label='state')
self._activationParamAxes = self._statePlotFigure.add_subplot(2,1,2, title='H-H activation parameters vs time')
self._activationParamAxes.set_xlabel('Time (ms)')
#for axis in self._plotFigure.axes:
# axis.autoscale(False)
self._stateplot_xvar_label = QtGui.QLabel('Variable on X-axis')
self._stateplot_xvar_combo = QtGui.QComboBox()
self._stateplot_xvar_combo.addItems(['V', 'm', 'n', 'h'])
self._stateplot_xvar_combo.setCurrentIndex(0)
self._stateplot_xvar_combo.setEditable(False)
self.connect(self._stateplot_xvar_combo,
QtCore.SIGNAL('currentIndexChanged(const QString&)'),
self._statePlotXSlot)
self._stateplot_yvar_label = QtGui.QLabel('Variable on Y-axis')
self._stateplot_yvar_combo = QtGui.QComboBox()
self._stateplot_yvar_combo.addItems(['V', 'm', 'n', 'h'])
self._stateplot_yvar_combo.setCurrentIndex(2)
self._stateplot_yvar_combo.setEditable(False)
self.connect(self._stateplot_yvar_combo,
QtCore.SIGNAL('currentIndexChanged(const QString&)'),
self._statePlotYSlot)
self._statePlotNavigator = NavigationToolbar(self._statePlotCanvas, self._statePlotWidget)
frame = QtGui.QFrame()
frame.setFrameStyle(QtGui.QFrame.StyledPanel + QtGui.QFrame.Raised)
layout = QtGui.QHBoxLayout()
layout.addWidget(self._stateplot_xvar_label)
layout.addWidget(self._stateplot_xvar_combo)
layout.addWidget(self._stateplot_yvar_label)
layout.addWidget(self._stateplot_yvar_combo)
frame.setLayout(layout)
self._closeStatePlotAction = QtGui.QAction('Close', self)
self.connect(self._closeStatePlotAction, QtCore.SIGNAL('triggered()'), self._statePlotWidget.close)
self._closeStatePlotButton = QtGui.QToolButton()
self._closeStatePlotButton.setDefaultAction(self._closeStatePlotAction)
layout = QtGui.QVBoxLayout()
layout.addWidget(frame)
layout.addWidget(self._statePlotCanvas)
layout.addWidget(self._statePlotNavigator)
layout.addWidget(self._closeStatePlotButton)
self._statePlotWidget.setLayout(layout)
# Setting the close event so that when the help window is
# closed the ``State plot`` button becomes unchecked
self._statePlotWidget.closeEvent = lambda event: self._showStatePlotAction.setChecked(False)
def _createRunControl(self):
self._runControlBox = QtGui.QGroupBox(self)
self._runControlBox.setSizePolicy(QtGui.QSizePolicy.Preferred, QtGui.QSizePolicy.Preferred)
self._runTimeLabel = QtGui.QLabel("Run time (ms)", self._runControlBox)
self._simTimeStepLabel = QtGui.QLabel("Simulation time step (ms)", self._runControlBox)
self._runTimeEdit = QtGui.QLineEdit('%g' % (SquidGui.defaults['runtime']), self._runControlBox)
set_default_line_edit_size(self._runTimeEdit)
self._simTimeStepEdit = QtGui.QLineEdit('%g' % (SquidGui.defaults['simdt']), self._runControlBox)
set_default_line_edit_size(self._simTimeStepEdit)
layout = QtGui.QGridLayout()
layout.addWidget(self._runTimeLabel, 0, 0)
layout.addWidget(self._runTimeEdit, 0, 1)
layout.addWidget(self._simTimeStepLabel, 1, 0)
layout.addWidget(self._simTimeStepEdit, 1, 1)
layout.setColumnStretch(2, 1.0)
layout.setRowStretch(2, 1.0)
self._runControlBox.setLayout(layout)
self._runControlDock = QtGui.QDockWidget('Simulation', self)
self._runControlDock.setWidget(self._runControlBox)
def _createChannelControl(self):
self._channelControlDock = QtGui.QDockWidget('Channels', self)
self._channelCtrlBox = QtGui.QGroupBox(self)
self._naConductanceToggle = QtGui.QCheckBox('Block Na+ channel', self._channelCtrlBox)
self._naConductanceToggle.setToolTip('<html>%s</html>' % (tooltip_NaChan))
self._kConductanceToggle = QtGui.QCheckBox('Block K+ channel', self._channelCtrlBox)
self._kConductanceToggle.setToolTip('<html>%s</html>' % (tooltip_KChan))
self._kOutLabel = QtGui.QLabel('[K+]out (mM)', self._channelCtrlBox)
self._kOutEdit = QtGui.QLineEdit('%g' % (self.squid_setup.squid_axon.K_out),
self._channelCtrlBox)
self._kOutLabel.setToolTip('<html>%s</html>' % (tooltip_Nernst))
self._kOutEdit.setToolTip('<html>%s</html>' % (tooltip_Nernst))
set_default_line_edit_size(self._kOutEdit)
self._naOutLabel = QtGui.QLabel('[Na+]out (mM)', self._channelCtrlBox)
self._naOutEdit = QtGui.QLineEdit('%g' % (self.squid_setup.squid_axon.Na_out),
self._channelCtrlBox)
self._naOutLabel.setToolTip('<html>%s</html>' % (tooltip_Nernst))
self._naOutEdit.setToolTip('<html>%s</html>' % (tooltip_Nernst))
set_default_line_edit_size(self._naOutEdit)
self._kInLabel = QtGui.QLabel('[K+]in (mM)', self._channelCtrlBox)
self._kInEdit = QtGui.QLineEdit('%g' % (self.squid_setup.squid_axon.K_in),
self._channelCtrlBox)
self._kInEdit.setToolTip(tooltip_Nernst)
self._naInLabel = QtGui.QLabel('[Na+]in (mM)', self._channelCtrlBox)
self._naInEdit = QtGui.QLineEdit('%g' % (self.squid_setup.squid_axon.Na_in),
self._channelCtrlBox)
self._naInEdit.setToolTip('<html>%s</html>' % (tooltip_Nernst))
self._temperatureLabel = QtGui.QLabel('Temperature (C)', self._channelCtrlBox)
self._temperatureEdit = QtGui.QLineEdit('%g' % (self.defaults['temperature'] - CELSIUS_TO_KELVIN),
self._channelCtrlBox)
self._temperatureEdit.setToolTip('<html>%s</html>' % (tooltip_Nernst))
set_default_line_edit_size(self._temperatureEdit)
for child in self._channelCtrlBox.children():
if isinstance(child, QtGui.QLineEdit):
set_default_line_edit_size(child)
layout = QtGui.QGridLayout(self._channelCtrlBox)
layout.addWidget(self._naConductanceToggle, 0, 0)
layout.addWidget(self._kConductanceToggle, 1, 0)
layout.addWidget(self._naOutLabel, 2, 0)
layout.addWidget(self._naOutEdit, 2, 1)
layout.addWidget(self._naInLabel, 3, 0)
layout.addWidget(self._naInEdit, 3, 1)
layout.addWidget(self._kOutLabel, 4, 0)
layout.addWidget(self._kOutEdit, 4, 1)
layout.addWidget(self._kInLabel, 5, 0)
layout.addWidget(self._kInEdit, 5, 1)
layout.addWidget(self._temperatureLabel, 6, 0)
layout.addWidget(self._temperatureEdit, 6, 1)
layout.setRowStretch(7, 1.0)
self._channelCtrlBox.setLayout(layout)
self._channelControlDock.setWidget(self._channelCtrlBox)
return self._channelCtrlBox
def __get_stateplot_data(self, name):
data = []
if name == 'V':
data = self.squid_setup.vm_table.vector
elif name == 'm':
data = self.squid_setup.m_table.vector
elif name == 'h':
data = self.squid_setup.h_table.vector
elif name == 'n':
data = self.squid_setup.n_table.vector
else:
raise ValueError('Unrecognized selection: %s' % (name))
return numpy.asarray(data)
def _statePlotYSlot(self, selectedItem):
ydata = self.__get_stateplot_data(str(selectedItem))
self._state_plot.set_ydata(ydata)
self._statePlotAxes.set_ylabel(selectedItem)
if str(selectedItem) == 'V':
self._statePlotAxes.set_ylim(-20, 120)
else:
self._statePlotAxes.set_ylim(0, 1)
self._statePlotCanvas.draw()
def _statePlotXSlot(self, selectedItem):
xdata = self.__get_stateplot_data(str(selectedItem))
self._state_plot.set_xdata(xdata)
self._statePlotAxes.set_xlabel(selectedItem)
if str(selectedItem) == 'V':
self._statePlotAxes.set_xlim(-20, 120)
else:
self._statePlotAxes.set_xlim(0, 1)
self._statePlotCanvas.draw()
def _createElectronicsControl(self):
"""Creates a tabbed widget of voltage clamp and current clamp controls"""
self._electronicsTab = QtGui.QTabWidget(self)
self._electronicsTab.addTab(self._getIClampCtrlBox(), 'Current clamp')
self._electronicsTab.addTab(self._getVClampCtrlBox(), 'Voltage clamp')
self._electronicsDock = QtGui.QDockWidget(self)
self._electronicsDock.setWidget(self._electronicsTab)
def _getVClampCtrlBox(self):
vClampPanel = QtGui.QGroupBox(self)
self._vClampCtrlBox = vClampPanel
self._holdingVLabel = QtGui.QLabel("Holding Voltage (mV)", vClampPanel)
self._holdingVEdit = QtGui.QLineEdit('%g' % (SquidGui.defaults['vclamp.holdingV']), vClampPanel)
self._holdingTimeLabel = QtGui.QLabel("Holding Time (ms)", vClampPanel)
self._holdingTimeEdit = QtGui.QLineEdit('%g' % (SquidGui.defaults['vclamp.holdingT']), vClampPanel)
self._prePulseVLabel = QtGui.QLabel("Pre-pulse Voltage (mV)", vClampPanel)
self._prePulseVEdit = QtGui.QLineEdit('%g' % (SquidGui.defaults['vclamp.prepulseV']), vClampPanel)
self._prePulseTimeLabel = QtGui.QLabel("Pre-pulse Time (ms)", vClampPanel)
self._prePulseTimeEdit = QtGui.QLineEdit('%g' % (SquidGui.defaults['vclamp.prepulseT']), vClampPanel)
self._clampVLabel = QtGui.QLabel("Clamp Voltage (mV)", vClampPanel)
self._clampVEdit = QtGui.QLineEdit('%g' % (SquidGui.defaults['vclamp.clampV']), vClampPanel)
self._clampTimeLabel = QtGui.QLabel("Clamp Time (ms)", vClampPanel)
self._clampTimeEdit = QtGui.QLineEdit('%g' % (SquidGui.defaults['vclamp.clampT']), vClampPanel)
for child in vClampPanel.children():
if isinstance(child, QtGui.QLineEdit):
set_default_line_edit_size(child)
layout = QtGui.QGridLayout(vClampPanel)
layout.addWidget(self._holdingVLabel, 0, 0)
layout.addWidget(self._holdingVEdit, 0, 1)
layout.addWidget(self._holdingTimeLabel, 1, 0)
layout.addWidget(self._holdingTimeEdit, 1, 1)
layout.addWidget(self._prePulseVLabel, 2, 0)
layout.addWidget(self._prePulseVEdit, 2, 1)
layout.addWidget(self._prePulseTimeLabel,3,0)
layout.addWidget(self._prePulseTimeEdit, 3, 1)
layout.addWidget(self._clampVLabel, 4, 0)
layout.addWidget(self._clampVEdit, 4, 1)
layout.addWidget(self._clampTimeLabel, 5, 0)
layout.addWidget(self._clampTimeEdit, 5, 1)
layout.setRowStretch(6, 1.0)
vClampPanel.setLayout(layout)
return self._vClampCtrlBox
def _getIClampCtrlBox(self):
iClampPanel = QtGui.QGroupBox(self)
self._iClampCtrlBox = iClampPanel
self._baseCurrentLabel = QtGui.QLabel("Base Current Level (uA)",iClampPanel)
self._baseCurrentEdit = QtGui.QLineEdit('%g' % (SquidGui.defaults['iclamp.baseI']),iClampPanel)
self._firstPulseLabel = QtGui.QLabel("First Pulse Current (uA)", iClampPanel)
self._firstPulseEdit = QtGui.QLineEdit('%g' % (SquidGui.defaults['iclamp.firstI']), iClampPanel)
self._firstDelayLabel = QtGui.QLabel("First Onset Delay (ms)", iClampPanel)
self._firstDelayEdit = QtGui.QLineEdit('%g' % (SquidGui.defaults['iclamp.firstD']),iClampPanel)
self._firstPulseWidthLabel = QtGui.QLabel("First Pulse Width (ms)", iClampPanel)
self._firstPulseWidthEdit = QtGui.QLineEdit('%g' % (SquidGui.defaults['iclamp.firstT']), iClampPanel)
self._secondPulseLabel = QtGui.QLabel("Second Pulse Current (uA)", iClampPanel)
self._secondPulseEdit = QtGui.QLineEdit('%g' % (SquidGui.defaults['iclamp.secondI']), iClampPanel)
self._secondDelayLabel = QtGui.QLabel("Second Onset Delay (ms)", iClampPanel)
self._secondDelayEdit = QtGui.QLineEdit('%g' % (SquidGui.defaults['iclamp.secondD']),iClampPanel)
self._secondPulseWidthLabel = QtGui.QLabel("Second Pulse Width (ms)", iClampPanel)
self._secondPulseWidthEdit = QtGui.QLineEdit('%g' % (SquidGui.defaults['iclamp.secondT']), iClampPanel)
self._pulseMode = QtGui.QComboBox(iClampPanel)
self._pulseMode.addItem("Single Pulse")
self._pulseMode.addItem("Pulse Train")
for child in iClampPanel.children():
if isinstance(child, QtGui.QLineEdit):
set_default_line_edit_size(child)
layout = QtGui.QGridLayout(iClampPanel)
layout.addWidget(self._baseCurrentLabel, 0, 0)
layout.addWidget(self._baseCurrentEdit, 0, 1)
layout.addWidget(self._firstPulseLabel, 1, 0)
layout.addWidget(self._firstPulseEdit, 1, 1)
layout.addWidget(self._firstDelayLabel, 2, 0)
layout.addWidget(self._firstDelayEdit, 2, 1)
layout.addWidget(self._firstPulseWidthLabel, 3, 0)
layout.addWidget(self._firstPulseWidthEdit, 3, 1)
layout.addWidget(self._secondPulseLabel, 4, 0)
layout.addWidget(self._secondPulseEdit, 4, 1)
layout.addWidget(self._secondDelayLabel, 5, 0)
layout.addWidget(self._secondDelayEdit, 5, 1)
layout.addWidget(self._secondPulseWidthLabel, 6, 0)
layout.addWidget(self._secondPulseWidthEdit, 6, 1)
layout.addWidget(self._pulseMode, 7, 0, 1, 2)
layout.setRowStretch(8, 1.0)
# layout.setSizeConstraint(QtGui.QLayout.SetFixedSize)
iClampPanel.setLayout(layout)
return self._iClampCtrlBox
def _overlayPlots(self, overlay):
if not overlay:
for axis in (self._plotFigure.axes + self._statePlotFigure.axes):
title = axis.get_title()
axis.clear()
axis.set_title(title)
suffix = ''
else:
suffix = '_%d' % (len(self._plot_dict['vm']))
self._vm_axes.set_xlim(0.0, self._runtime)
self._g_axes.set_xlim(0.0, self._runtime)
self._im_axes.set_xlim(0.0, self._runtime)
self._i_axes.set_xlim(0.0, self._runtime)
self._vm_plot, = self._vm_axes.plot([], [], label='Vm%s'%(suffix))
self._plot_dict['vm'].append(self._vm_plot)
self._command_plot, = self._vm_axes.plot([], [], label='command%s'%(suffix))
self._plot_dict['command'].append(self._command_plot)
# Channel conductances go to the same subplot
self._gna_plot, = self._g_axes.plot([], [], label='Na%s'%(suffix))
self._plot_dict['gna'].append(self._gna_plot)
self._gk_plot, = self._g_axes.plot([], [], label='K%s'%(suffix))
self._plot_dict['gk'].append(self._gk_plot)
# Injection current for Vclamp/Iclamp go to the same subplot
self._iclamp_plot, = self._im_axes.plot([], [], label='Iclamp%s'%(suffix))
self._vclamp_plot, = self._im_axes.plot([], [], label='Vclamp%s'%(suffix))
self._plot_dict['iclamp'].append(self._iclamp_plot)
self._plot_dict['vclamp'].append(self._vclamp_plot)
# Channel currents go to the same subplot
self._ina_plot, = self._i_axes.plot([], [], label='Na%s'%(suffix))
self._plot_dict['ina'].append(self._ina_plot)
self._ik_plot, = self._i_axes.plot([], [], label='K%s'%(suffix))
self._plot_dict['ik'].append(self._ik_plot)
# self._i_axes.legend()
# State plots
self._state_plot, = self._statePlotAxes.plot([], [], label='state%s'%(suffix))
self._plot_dict['state'].append(self._state_plot)
self._m_plot, = self._activationParamAxes.plot([],[], label='m%s'%(suffix))
self._h_plot, = self._activationParamAxes.plot([], [], label='h%s'%(suffix))
self._n_plot, = self._activationParamAxes.plot([], [], label='n%s'%(suffix))
self._plot_dict['m'].append(self._m_plot)
self._plot_dict['h'].append(self._h_plot)
self._plot_dict['n'].append(self._n_plot)
if self._showLegendAction.isChecked():
for axis in (self._plotFigure.axes + self._statePlotFigure.axes):
axis.legend()
def _updateAllPlots(self):
self._updatePlots()
self._updateStatePlot()
def _updatePlots(self):
if len(self.squid_setup.vm_table.vector) <= 0:
return
vm = numpy.asarray(self.squid_setup.vm_table.vector)
cmd = numpy.asarray(self.squid_setup.cmd_table.vector)
ik = numpy.asarray(self.squid_setup.ik_table.vector)
ina = numpy.asarray(self.squid_setup.ina_table.vector)
iclamp = numpy.asarray(self.squid_setup.iclamp_table.vector)
vclamp = numpy.asarray(self.squid_setup.vclamp_table.vector)
gk = numpy.asarray(self.squid_setup.gk_table.vector)
gna = numpy.asarray(self.squid_setup.gna_table.vector)
time_series = numpy.linspace(0, self._plotdt * len(vm), len(vm))
self._vm_plot.set_data(time_series, vm)
time_series = numpy.linspace(0, self._plotdt * len(cmd), len(cmd))
self._command_plot.set_data(time_series, cmd)
time_series = numpy.linspace(0, self._plotdt * len(ik), len(ik))
self._ik_plot.set_data(time_series, ik)
time_series = numpy.linspace(0, self._plotdt * len(ina), len(ina))
self._ina_plot.set_data(time_series, ina)
time_series = numpy.linspace(0, self._plotdt * len(iclamp), len(iclamp))
self._iclamp_plot.set_data(time_series, iclamp)
time_series = numpy.linspace(0, self._plotdt * len(vclamp), len(vclamp))
self._vclamp_plot.set_data(time_series, vclamp)
time_series = numpy.linspace(0, self._plotdt * len(gk), len(gk))
self._gk_plot.set_data(time_series, gk)
time_series = numpy.linspace(0, self._plotdt * len(gna), len(gna))
self._gna_plot.set_data(time_series, gna)
# self._vm_axes.margins(y=0.1)
# self._g_axes.margin(y=0.1)
# self._im_axes.margins(y=0.1)
# self._i_axes.margins(y=0.1)
if self._autoscaleAction.isChecked():
for axis in self._plotFigure.axes:
axis.relim()
axis.margins(0.1, 0.1)
axis.autoscale_view(tight=True)
else:
self._vm_axes.set_ylim(-20.0, 120.0)
self._g_axes.set_ylim(0.0, 0.5)
self._im_axes.set_ylim(-0.5, 0.5)
self._i_axes.set_ylim(-10, 10)
self._vm_axes.set_xlim(0.0, time_series[-1])
self._g_axes.set_xlim(0.0, time_series[-1])
self._im_axes.set_xlim(0.0, time_series[-1])
self._i_axes.set_xlim(0.0, time_series[-1])
self._plotCanvas.draw()
def _updateStatePlot(self):
if len(self.squid_setup.vm_table.vector) <= 0:
return
sx = str(self._stateplot_xvar_combo.currentText())
sy = str(self._stateplot_yvar_combo.currentText())
xdata = self.__get_stateplot_data(sx)
ydata = self.__get_stateplot_data(sy)
minlen = min(len(xdata), len(ydata))
self._state_plot.set_data(xdata[:minlen], ydata[:minlen])
self._statePlotAxes.set_xlabel(sx)
self._statePlotAxes.set_ylabel(sy)
if sx == 'V':
self._statePlotAxes.set_xlim(-20, 120)
else:
self._statePlotAxes.set_xlim(0, 1)
if sy == 'V':
self._statePlotAxes.set_ylim(-20, 120)
else:
self._statePlotAxes.set_ylim(0, 1)
self._activationParamAxes.set_xlim(0, self._runtime)
m = self.__get_stateplot_data('m')
n = self.__get_stateplot_data('n')
h = self.__get_stateplot_data('h')
time_series = numpy.linspace(0, self._plotdt*len(m), len(m))
self._m_plot.set_data(time_series, m)
time_series = numpy.linspace(0, self._plotdt*len(h), len(h))
self._h_plot.set_data(time_series, h)
time_series = numpy.linspace(0, self._plotdt*len(n), len(n))
self._n_plot.set_data(time_series, n)
if self._autoscaleAction.isChecked():
for axis in self._statePlotFigure.axes:
axis.relim()
axis.set_autoscale_on(True)
axis.autoscale_view(True)
self._statePlotCanvas.draw()
def _runSlot(self):
if moose.isRunning():
print('Stopping simulation in progress ...')
moose.stop()
self._runtime = self.getFloatInput(self._runTimeEdit, self._runTimeLabel.text())
self._overlayPlots(self._overlayAction.isChecked())
self._simdt = self.getFloatInput(self._simTimeStepEdit, self._simTimeStepLabel.text())
clampMode = None
singlePulse = True
if self._electronicsTab.currentWidget() == self._vClampCtrlBox:
clampMode = 'vclamp'
baseLevel = self.getFloatInput(self._holdingVEdit, self._holdingVLabel.text())
firstDelay = self.getFloatInput(self._holdingTimeEdit, self._holdingTimeLabel.text())
firstWidth = self.getFloatInput(self._prePulseTimeEdit, self._prePulseTimeLabel.text())
firstLevel = self.getFloatInput(self._prePulseVEdit, self._prePulseVLabel.text())
secondDelay = firstWidth
secondWidth = self.getFloatInput(self._clampTimeEdit, self._clampTimeLabel.text())
secondLevel = self.getFloatInput(self._clampVEdit, self._clampVLabel.text())
if not self._autoscaleAction.isChecked():
self._im_axes.set_ylim(-10.0, 10.0)
else:
clampMode = 'iclamp'
baseLevel = self.getFloatInput(self._baseCurrentEdit, self._baseCurrentLabel.text())
firstDelay = self.getFloatInput(self._firstDelayEdit, self._firstDelayLabel.text())
firstWidth = self.getFloatInput(self._firstPulseWidthEdit, self._firstPulseWidthLabel.text())
firstLevel = self.getFloatInput(self._firstPulseEdit, self._firstPulseLabel.text())
secondDelay = self.getFloatInput(self._secondDelayEdit, self._secondDelayLabel.text())
secondLevel = self.getFloatInput(self._secondPulseEdit, self._secondPulseLabel.text())
secondWidth = self.getFloatInput(self._secondPulseWidthEdit, self._secondPulseWidthLabel.text())
singlePulse = (self._pulseMode.currentIndex() == 0)
if not self._autoscaleAction.isChecked():
self._im_axes.set_ylim(-0.4, 0.4)
self.squid_setup.clamp_ckt.configure_pulses(baseLevel=baseLevel,
firstDelay=firstDelay,
firstWidth=firstWidth,
firstLevel=firstLevel,
secondDelay=secondDelay,
secondWidth=secondWidth,
secondLevel=secondLevel,
singlePulse=singlePulse)
if self._kConductanceToggle.isChecked():
self.squid_setup.squid_axon.specific_gK = 0.0
else:
self.squid_setup.squid_axon.specific_gK = SquidAxon.defaults['specific_gK']
if self._naConductanceToggle.isChecked():
self.squid_setup.squid_axon.specific_gNa = 0.0
else:
self.squid_setup.squid_axon.specific_gNa = SquidAxon.defaults['specific_gNa']
self.squid_setup.squid_axon.celsius = self.getFloatInput(self._temperatureEdit, self._temperatureLabel.text())
self.squid_setup.squid_axon.K_out = self.getFloatInput(self._kOutEdit, self._kOutLabel.text())
self.squid_setup.squid_axon.Na_out = self.getFloatInput(self._naOutEdit, self._naOutLabel.text())
self.squid_setup.squid_axon.K_in = self.getFloatInput(self._kInEdit, self._kInLabel.text())
self.squid_setup.squid_axon.Na_in = self.getFloatInput(self._naInEdit, self._naInLabel.text())
self.squid_setup.squid_axon.updateEk()
self.squid_setup.schedule(self._simdt, self._plotdt, clampMode)
# The following line is for use with Qthread
self.squid_setup.run(self._runtime)
self._updateAllPlots()
def _toggleDocking(self, on):
self._channelControlDock.setFloating(on)
self._electronicsDock.setFloating(on)
self._runControlDock.setFloating(on)
def _restoreDocks(self):
self._channelControlDock.setVisible(True)
self._electronicsDock.setVisible(True)
self._runControlDock.setVisible(True)
def _initActions(self):
self._runAction = QtGui.QAction(self.tr('Run'), self)
self._runAction.setShortcut(self.tr('F5'))
self._runAction.setToolTip('Run simulation (F5)')
self.connect(self._runAction, QtCore.SIGNAL('triggered()'), self._runSlot)
self._resetToDefaultsAction = QtGui.QAction(self.tr('Restore defaults'), self)
self._resetToDefaultsAction.setToolTip('Reset all settings to their default values')
self.connect(self._resetToDefaultsAction, QtCore.SIGNAL('triggered()'), self._useDefaults)
self._showLegendAction = QtGui.QAction(self.tr('Display legend'), self)
self._showLegendAction.setCheckable(True)
self.connect(self._showLegendAction, QtCore.SIGNAL('toggled(bool)'), self._showLegend)
self._showStatePlotAction = QtGui.QAction(self.tr('State plot'), self)
self._showStatePlotAction.setCheckable(True)
self._showStatePlotAction.setChecked(False)
self.connect(self._showStatePlotAction, QtCore.SIGNAL('toggled(bool)'), self._statePlotWidget.setVisible)
self._autoscaleAction = QtGui.QAction(self.tr('Auto-scale plots'), self)
self._autoscaleAction.setCheckable(True)
self._autoscaleAction.setChecked(False)
self.connect(self._autoscaleAction, QtCore.SIGNAL('toggled(bool)'), self._autoscale)
self._overlayAction = QtGui.QAction('Overlay plots', self)
self._overlayAction.setCheckable(True)
self._overlayAction.setChecked(False)
self._dockAction = QtGui.QAction('Undock all', self)
self._dockAction.setCheckable(True)
self._dockAction.setChecked(False)
self.connect(self._dockAction, QtCore.SIGNAL('toggled(bool)'), self._toggleDocking)
self._restoreDocksAction = QtGui.QAction('Show all', self)
self.connect(self._restoreDocksAction, QtCore.SIGNAL('triggered()'), self._restoreDocks)
self._quitAction = QtGui.QAction(self.tr('&Quit'), self)
self._quitAction.setShortcut(self.tr('Ctrl+Q'))
self.connect(self._quitAction, QtCore.SIGNAL('triggered()'), QtGui.qApp.closeAllWindows)
def _createRunToolBar(self):
self._simToolBar = self.addToolBar(self.tr('Simulation control'))
self._simToolBar.addAction(self._quitAction)
self._simToolBar.addAction(self._runAction)
self._simToolBar.addAction(self._resetToDefaultsAction)
self._simToolBar.addAction(self._dockAction)
self._simToolBar.addAction(self._restoreDocksAction)
def _createPlotToolBar(self):
self._plotToolBar = self.addToolBar(self.tr('Plotting control'))
self._plotToolBar.addAction(self._showLegendAction)
self._plotToolBar.addAction(self._autoscaleAction)
self._plotToolBar.addAction(self._overlayAction)
self._plotToolBar.addAction(self._showStatePlotAction)
self._plotToolBar.addAction(self._helpAction)
self._plotToolBar.addAction(self._helpBiophysicsAction)
def _showLegend(self, on):
if on:
for axis in (self._plotFigure.axes + self._statePlotFigure.axes):
axis.legend().set_visible(True)
else:
for axis in (self._plotFigure.axes + self._statePlotFigure.axes):
axis.legend().set_visible(False)
self._plotCanvas.draw()
self._statePlotCanvas.draw()
def _autoscale(self, on):
if on:
for axis in (self._plotFigure.axes + self._statePlotFigure.axes):
axis.relim()
axis.set_autoscale_on(True)
axis.autoscale_view(True)
else:
for axis in self._plotFigure.axes:
axis.set_autoscale_on(False)
self._vm_axes.set_ylim(-20.0, 120.0)
self._g_axes.set_ylim(0.0, 0.5)
self._im_axes.set_ylim(-0.5, 0.5)
self._i_axes.set_ylim(-10, 10)
self._plotCanvas.draw()
self._statePlotCanvas.draw()
def _useDefaults(self):
self._runTimeEdit.setText('%g' % (self.defaults['runtime']))
self._simTimeStepEdit.setText('%g' % (self.defaults['simdt']))
self._overlayAction.setChecked(False)
self._naConductanceToggle.setChecked(False)
self._kConductanceToggle.setChecked(False)
self._kOutEdit.setText('%g' % (SquidGui.defaults['K_out']))
self._naOutEdit.setText('%g' % (SquidGui.defaults['Na_out']))
self._kInEdit.setText('%g' % (SquidGui.defaults['K_in']))
self._naInEdit.setText('%g' % (SquidGui.defaults['Na_in']))
self._temperatureEdit.setText('%g' % (SquidGui.defaults['temperature'] - CELSIUS_TO_KELVIN))
self._holdingVEdit.setText('%g' % (SquidGui.defaults['vclamp.holdingV']))
self._holdingTimeEdit.setText('%g' % (SquidGui.defaults['vclamp.holdingT']))
self._prePulseVEdit.setText('%g' % (SquidGui.defaults['vclamp.prepulseV']))
self._prePulseTimeEdit.setText('%g' % (SquidGui.defaults['vclamp.prepulseT']))
self._clampVEdit.setText('%g' % (SquidGui.defaults['vclamp.clampV']))
self._clampTimeEdit.setText('%g' % (SquidGui.defaults['vclamp.clampT']))
self._baseCurrentEdit.setText('%g' % (SquidGui.defaults['iclamp.baseI']))
self._firstPulseEdit.setText('%g' % (SquidGui.defaults['iclamp.firstI']))
self._firstDelayEdit.setText('%g' % (SquidGui.defaults['iclamp.firstD']))
self._firstPulseWidthEdit.setText('%g' % (SquidGui.defaults['iclamp.firstT']))
self._secondPulseEdit.setText('%g' % (SquidGui.defaults['iclamp.secondI']))
self._secondDelayEdit.setText('%g' % (SquidGui.defaults['iclamp.secondD']))
self._secondPulseWidthEdit.setText('%g' % (SquidGui.defaults['iclamp.secondT']))
self._pulseMode.setCurrentIndex(0)
def _onScroll(self, event):
if event.inaxes is None:
return
axes = event.inaxes
zoom = 0.0
if event.button == 'up':
zoom = -1.0
elif event.button == 'down':
zoom = 1.0
if zoom != 0.0:
self._plotNavigator.push_current()
axes.get_xaxis().zoom(zoom)
axes.get_yaxis().zoom(zoom)
self._plotCanvas.draw()
def closeEvent(self, event):
QtGui.qApp.closeAllWindows()
def _showBioPhysicsHelp(self):
self._createHelpMessage()
self._helpMessageText.setText('<html><p>%s</p><p>%s</p><p>%s</p><p>%s</p><p>%s</p></html>' %
(tooltip_Nernst,
tooltip_Erest,
tooltip_KChan,
tooltip_NaChan,
tooltip_Im))
self._helpWindow.setVisible(True)
def _showRunningHelp(self):
self._createHelpMessage()
self._helpMessageText.setSource(QtCore.QUrl(self._helpBaseURL))
self._helpWindow.setVisible(True)
def _createHelpMessage(self):
if hasattr(self, '_helpWindow'):
return
self._helpWindow = QtGui.QWidget()
self._helpWindow.setWindowFlags(QtCore.Qt.Window)
layout = QtGui.QVBoxLayout()
self._helpWindow.setLayout(layout)
self._helpMessageArea = QtGui.QScrollArea()
self._helpMessageText = QtGui.QTextBrowser()
self._helpMessageText.setOpenExternalLinks(True)
self._helpMessageArea.setWidget(self._helpMessageText)
layout.addWidget(self._helpMessageText)
self._squidGuiPath = os.path.dirname(os.path.abspath(__file__))
self._helpBaseURL = os.path.join(self._squidGuiPath,'help.html')
self._helpMessageText.setSource(QtCore.QUrl(self._helpBaseURL))
self._helpMessageText.setSizePolicy(QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Expanding)
self._helpMessageArea.setSizePolicy(QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Expanding)
self._helpMessageText.setMinimumSize(800, 600)
self._closeHelpAction = QtGui.QAction('Close', self)
self.connect(self._closeHelpAction, QtCore.SIGNAL('triggered()'), self._helpWindow.close)
# Setting the close event so that the ``Help`` button is
# unchecked when the help window is closed
self._helpWindow.closeEvent = lambda event: self._helpAction.setChecked(False)
self._helpTOCAction = QtGui.QAction('Help running demo', self)
self.connect(self._helpTOCAction, QtCore.SIGNAL('triggered()'), self._jumpToHelpTOC)
# This panel is for putting two buttons using horizontal
# layout
panel = QtGui.QFrame()
panel.setFrameStyle(QtGui.QFrame.StyledPanel + QtGui.QFrame.Raised)
layout.addWidget(panel)
layout = QtGui.QHBoxLayout()
panel.setLayout(layout)
self._helpAction = QtGui.QAction('Help running', self)
self.connect(self._helpAction, QtCore.SIGNAL('triggered()'), self._showRunningHelp)
self._helpBiophysicsAction = QtGui.QAction('Help biophysics', self)
self.connect(self._helpBiophysicsAction, QtCore.SIGNAL('triggered()'), self._showBioPhysicsHelp)
self._helpTOCButton = QtGui.QToolButton()
self._helpTOCButton.setDefaultAction(self._helpTOCAction)
self._helpBiophysicsButton = QtGui.QToolButton()
self._helpBiophysicsButton.setDefaultAction(self._helpBiophysicsAction)
layout.addWidget(self._helpTOCButton)
layout.addWidget(self._helpBiophysicsButton)
self._closeHelpButton = QtGui.QToolButton()
self._closeHelpButton.setDefaultAction(self._closeHelpAction)
layout.addWidget(self._closeHelpButton)
def _jumpToHelpTOC(self):
self._helpMessageText.setSource(QtCore.QUrl(self._helpBaseURL))
if __name__ == '__main__':
app = QtGui.QApplication(sys.argv)
app.connect(app, QtCore.SIGNAL('lastWindowClosed()'), app, QtCore.SLOT('quit()'))
QtGui.qApp = app
squid_gui = SquidGui()
squid_gui.show()
print((squid_gui.size()))
sys.exit(app.exec_())
#
# squidgui.py ends here
|
BhallaLab/moose
|
moose-examples/squid/squid_demo.py
|
Python
|
gpl-3.0
| 44,883
|
[
"MOOSE"
] |
992799fdf8b27bb54eb76e249712a30693c4a74f1335479e7309a34a7a20b54d
|
#!/usr/bin/python
#Audio Tools, a module and set of tools for manipulating audio data
#Copyright (C) 2008-2012 Brian Langenberger
#This program is free software; you can redistribute it and/or modify
#it under the terms of the GNU General Public License as published by
#the Free Software Foundation; either version 2 of the License, or
#(at your option) any later version.
#This program is distributed in the hope that it will be useful,
#but WITHOUT ANY WARRANTY; without even the implied warranty of
#MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
#GNU General Public License for more details.
#You should have received a copy of the GNU General Public License
#along with this program; if not, write to the Free Software
#Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
import sys
from itertools import izip
import bz2
import sqlite3
from hashlib import sha1
import base64
import anydbm
import subprocess
import tempfile
import whichdb
from . import BIN, transfer_data
import cStringIO
class UndoDB:
"""a class for performing undo operations on files
this stores an undo/redo patch for transforming a file
back to its original value, or forward again to its modified form"""
def __init__(self, filename):
"""filename is the location on disk for this undo database"""
self.db = sqlite3.connect(filename)
self.cursor = self.db.cursor()
self.cursor.execute("""CREATE TABLE IF NOT EXISTS patch (
patch_id INTEGER PRIMARY KEY AUTOINCREMENT,
patch_data BLOB NOT NULL
)""")
self.cursor.execute("""CREATE TABLE IF NOT EXISTS source_file (
source_checksum CHAR(40) PRIMARY KEY,
source_size INTEGER NOT NULL,
target_size INTEGER NOT NULL,
patch_id INTEGER,
FOREIGN KEY (patch_id) REFERENCES patch (patch_id) ON DELETE CASCADE
)""")
def close(self):
"""closes any open database handles"""
self.cursor.close()
self.db.close()
@classmethod
def build_patch(cls, s1, s2):
"""given two strings, returns a transformation patch
this function presumes the two strings will be largely
equal and similar in length. It operates by performing an
xOR operation across both and BZ2 compressing the result"""
if (len(s1) < len(s2)):
s1 += (chr(0) * (len(s2) - len(s1)))
elif (len(s2) < len(s1)):
s2 += (chr(0) * (len(s1) - len(s2)))
patch = bz2.compress("".join([chr(ord(x) ^ ord(y)) for (x, y) in
izip(s1, s2)]))
return patch
@classmethod
def apply_patch(cls, s, patch, new_length):
"""given a string, patch and new length, restores string
patch is the same BZ2 compressed output from build_patch()
new_length is the size of the string originally,
which must be stored externally from the patch itself"""
if (len(s) > new_length):
s = s[0:new_length]
elif (len(s) < new_length):
s += (chr(0) * (new_length - len(s)))
return "".join([chr(ord(x) ^ ord(y)) for (x, y) in
izip(s, bz2.decompress(patch))])
def __add__(self, file_data1, file_data2):
#file_data1's target is file_data2 and
#file_data2's target is file_data1
self.cursor.execute(
"INSERT INTO patch (patch_id, patch_data) VALUES (?, ?)",
[None,
base64.b64encode(UndoDB.build_patch(file_data1,
file_data2)).decode('ascii')])
patch_id = self.cursor.lastrowid
try:
self.cursor.execute("""INSERT INTO source_file (
source_checksum, source_size, target_size, patch_id) values (?, ?, ?, ?)""",
[sha1(file_data1).hexdigest().decode('ascii'),
len(file_data1),
len(file_data2),
patch_id])
self.cursor.execute("""INSERT INTO source_file (
source_checksum, source_size, target_size, patch_id) values (?, ?, ?, ?)""",
[sha1(file_data2).hexdigest().decode('ascii'),
len(file_data2),
len(file_data1),
patch_id])
self.db.commit()
except sqlite3.IntegrityError:
self.db.rollback()
def __undo__(self, file_data):
self.cursor.execute("""SELECT target_size, patch_data FROM
source_file, patch WHERE ((source_checksum = ?) AND
(source_size = ?) AND
(source_file.patch_id = patch.patch_id))""",
[sha1(file_data).hexdigest().decode('ascii'),
len(file_data)])
row = self.cursor.fetchone()
if (row is not None):
(target_size, patch) = row
return UndoDB.apply_patch(
file_data,
base64.b64decode(patch.encode('ascii')),
target_size)
else:
return None
def add(self, old_file, new_file):
"""adds an undo entry for transforming new_file to old_file
both are filename strings"""
old_f = open(old_file, 'rb')
new_f = open(new_file, 'rb')
try:
self.__add__(old_f.read(), new_f.read())
finally:
old_f.close()
new_f.close()
def undo(self, new_file):
"""updates new_file to its original state,
if present in the undo database
returns True if undo performed, False if not"""
new_f = open(new_file, 'rb')
try:
old_data = self.__undo__(new_f.read())
finally:
new_f.close()
if (old_data is not None):
old_f = open(new_file, 'wb')
old_f.write(old_data)
old_f.close()
return True
else:
return False
class OldUndoDB:
"""a class for performing legacy undo operations on files
this implementation is based on xdelta and requires it to be
installed to function
"""
def __init__(self, filename):
"""filename is the location on disk for this undo database"""
self.db = anydbm.open(filename, 'c')
def close(self):
"""closes any open database handles"""
self.db.close()
@classmethod
def checksum(cls, filename):
"""returns the SHA1 checksum of the filename's contents"""
f = open(filename, "rb")
c = sha1("")
try:
transfer_data(f.read, c.update)
return c.hexdigest()
finally:
f.close()
def add(self, old_file, new_file):
"""adds an undo entry for transforming new_file to old_file
both are filename strings"""
#perform xdelta between old and new track to temporary file
delta_f = tempfile.NamedTemporaryFile(suffix=".delta")
try:
if (subprocess.call([BIN["xdelta"],
"delta",
new_file, old_file, delta_f.name]) != 2):
#store the xdelta in our internal db
f = open(delta_f.name, 'rb')
data = cStringIO.StringIO()
transfer_data(f.read, data.write)
f.close()
self.db[OldUndoDB.checksum(new_file)] = data.getvalue()
else:
raise IOError("error performing xdelta operation")
finally:
delta_f.close()
def undo(self, new_file):
"""updates new_file to its original state,
if present in the undo database"""
undo_checksum = OldUndoDB.checksum(new_file)
if (undo_checksum in self.db.keys()):
#copy the xdelta to a temporary file
xdelta_f = tempfile.NamedTemporaryFile(suffix=".delta")
xdelta_f.write(self.db[undo_checksum])
xdelta_f.flush()
#patch the existing track to a temporary track
old_track = tempfile.NamedTemporaryFile()
try:
if (subprocess.call([BIN["xdelta"],
"patch",
xdelta_f.name,
new_file,
old_track.name]) == 0):
#copy the temporary track over the existing file
f1 = open(old_track.name, 'rb')
f2 = open(new_file, 'wb')
transfer_data(f1.read, f2.write)
f1.close()
f2.close()
return True
else:
raise IOError("error performing xdelta operation")
finally:
old_track.close()
xdelta_f.close()
else:
return False
def open_db(filename):
"""given a filename string, returns UndoDB or OldUndoDB
if the file doesn't exist, this uses UndoDB by default
otherwise, detect OldUndoDB if xdelta is installed"""
if (BIN.can_execute(BIN["xdelta"])):
db = whichdb.whichdb(filename)
if ((db is not None) and (db != '')):
return OldUndoDB(filename)
else:
return UndoDB(filename)
else:
return UndoDB(filename)
|
Excito/audiotools
|
audiotools/delta.py
|
Python
|
gpl-2.0
| 9,448
|
[
"Brian"
] |
5933107cef1a897b7cd09e09ecb11c7f152e2334c4cc09828afa4335ecd4bc3a
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Much of this code adapted from dynesty.plotting (Josh Speagle)
import numpy as np
from scipy.ndimage import gaussian_filter as norm_kde
try:
import matplotlib.pyplot as pl
from matplotlib.ticker import MaxNLocator, NullLocator
from matplotlib.ticker import ScalarFormatter
from matplotlib.colors import LinearSegmentedColormap, colorConverter
except(ImportError):
pass
__all__ = ["allcorner", "show_extras", "prettify_axes", "corner",
"twodhist", "marginal", "scatter",
"get_spans", "quantile", "_quantile", "get_cmap"]
def allcorner(samples, labels, axes, weights=None, span=None,
smooth=0.02, color="grey", qcolor=None, show_titles=False,
hist_kwargs={"alpha": 0.5, "histtype": "stepfilled"},
hist2d_kwargs={}, max_n_ticks=3,
label_kwargs={"fontsize": 12}, tick_kwargs={"labelsize": 8},
psamples=None, samples_kwargs={"marker": "o", "color": "k"}):
"""
Make a pretty corner plot from (weighted) posterior samples, with KDE smoothing.
Adapted from dyensty.plotting
Parameters
----------
samples : ndarry of shape (ndim, nsamples)
The samples of the posterior to plot
labels : iterable of strings, with shape (ndim,)
The labels for each dimension.
axes : ndarray of shape (ndim, ndim)
A 2-d array of matplotlib.pyplot.axes objects, into wich the marginal
and joint posteriors will be plotted.
weights : ndarray of shape (nsamples,), optional
The weights associated with each sample. If omitted, all samples are
assumed to have the same weight.
span : iterable with shape (ndim,), optional
A list where each element is either a length-2 tuple containing
lower and upper bounds or a float from `(0., 1.]` giving the
fraction of (weighted) samples to include. If a fraction is provided,
the bounds are chosen to be equal-tailed. An example would be::
span = [(0., 10.), 0.95, (5., 6.)]
Default is `0.999999426697` (5-sigma credible interval).
smooth : float or iterable with shape (ndim,), optional
The standard deviation (either a single value or a different value for
each subplot) for the Gaussian kernel used to smooth the 1-D and 2-D
marginalized posteriors, expressed as a fraction of the span.
Default is `0.02` (2% smoothing). If an integer is provided instead,
this will instead default to a simple (weighted) histogram with
`bins=smooth`.
color : str or iterable with shape (ndim,), optional
A `~matplotlib`-style color (either a single color or a different
value for each subplot) used when plotting the histograms.
Default is `'black'`.
qcolor : str or None
If not None, plot quantiles on the marginal plots as dashed lines with
this color.
show_titles : bool, default=False, optional
If True, show titles above each marginals giving median +/- numbers
hist_kwargs : dict, optional
Extra keyword arguments to send to the 1-D (smoothed) histograms.
hist2d_kwargs : dict, optional
Extra keyword arguments to send to the 2-D (smoothed) histograms.
max_n_ticks : see `prettify_axes`
label_kwargs : see `prettify_axes`
tick_kwargs : see `prettify_axes`
"""
axes = corner(samples, axes, weights=weights, span=span,
smooth=smooth, color=color,
hist_kwargs=hist_kwargs, hist2d_kwargs=hist2d_kwargs)
prettify_axes(axes, labels, max_n_ticks=max_n_ticks,
label_kwargs=label_kwargs, tick_kwargs=tick_kwargs)
if psamples is not None:
scatter(psamples, axes, zorder=10, **samples_kwargs)
if (qcolor is not None) | show_titles:
show_extras(samples, labels, axes, weights=weights,
qcolor=qcolor, show_titles=show_titles)
return axes
def show_extras(samples, labels, paxes, weights=None,
quantiles=[0.16, 0.5, 0.84], qcolor="k",
truths=None, show_titles=False, title_fmt=".2f",
truth_kwargs={}, title_kwargs={}):
"""Plot quantiles and truths as horizontal & vertical lines on an existing
cornerplot.
labels : iterable with shape (ndim,), optional
A list of names for each parameter. If not provided, the default name
used when plotting will follow :math:`x_i` style.
quantiles : iterable, optional
A list of fractional quantiles to overplot on the 1-D marginalized
posteriors as vertical dashed lines. Default is `[0.16, 0.5, 0.84]`
(spanning the 68%/1-sigma credible interval).
"""
for i, xx in enumerate(samples):
x = xx.flatten()
ax = paxes[i, i]
# Plot quantiles.
if (qcolor is not None) and len(quantiles) > 0:
qs = _quantile(x, quantiles, weights=weights)
for q in qs:
ax.axvline(q, lw=2, ls="dashed", color=qcolor)
# Add truth value(s).
if truths is not None and truths[i] is not None:
try:
[ax.axvline(t, **truth_kwargs)
for t in truths[i]]
except:
ax.axvline(truths[i], **truth_kwargs)
# Set titles.
if show_titles:
title = None
if title_fmt is not None:
ql, qm, qh = _quantile(x, [0.16, 0.5, 0.84], weights=weights)
q_minus, q_plus = qm - ql, qh - qm
fmt = "{{0:{0}}}".format(title_fmt).format
title = r"${{{0}}}_{{-{1}}}^{{+{2}}}$"
title = title.format(fmt(qm), fmt(q_minus), fmt(q_plus))
title = "{0} = {1}".format(labels[i], title)
ax.set_title(title, **title_kwargs)
for j, yy in enumerate(samples[:i]):
if j >= i:
continue
# Add truth values
if truths is not None:
if truths[j] is not None:
try:
[ax.axvline(t, **truth_kwargs)
for t in truths[j]]
except:
ax.axvline(truths[j], **truth_kwargs)
if truths[i] is not None:
try:
[ax.axhline(t, **truth_kwargs)
for t in truths[i]]
except:
ax.axhline(truths[i], **truth_kwargs)
def prettify_axes(paxes, labels=None, label_kwargs={}, tick_kwargs={},
max_n_ticks=3, top_ticks=False, use_math_text=True):
"""Set up cornerplot axis labels and ticks to look nice.
labels : iterable with shape (ndim,), optional
A list of names for each parameter. If not provided, the default name
used when plotting will follow :math:`x_i` style.
max_n_ticks : int, optional
Maximum number of ticks allowed. Default is `5`.
top_ticks : bool, optional
Whether to label the top (rather than bottom) ticks. Default is
`False`.
use_math_text : bool, optional
Whether the axis tick labels for very large/small exponents should be
displayed as powers of 10 rather than using `e`. Default is `False`.
"""
ndim = len(paxes)
for i in range(ndim):
ax = paxes[i, i]
# Setup axes
if max_n_ticks == 0:
ax.xaxis.set_major_locator(NullLocator())
ax.yaxis.set_major_locator(NullLocator())
else:
ax.xaxis.set_major_locator(MaxNLocator(max_n_ticks, prune="lower"))
ax.yaxis.set_major_locator(NullLocator())
# Label axes.
sf = ScalarFormatter(useMathText=use_math_text)
ax.xaxis.set_major_formatter(sf)
if i < ndim - 1:
if top_ticks:
ax.xaxis.set_ticks_position("top")
[l.set_rotation(45) for l in ax.get_xticklabels()]
else:
ax.set_xticklabels([])
else:
[l.set_rotation(45) for l in ax.get_xticklabels()]
ax.set_xlabel(labels[i], **label_kwargs)
ax.xaxis.set_label_coords(0.5, -0.3)
ax.tick_params(axis='both', which='major', **tick_kwargs)
for j in range(ndim):
ax = paxes[i, j]
if j > i:
ax.set_frame_on(False)
ax.set_xticks([])
ax.set_yticks([])
continue
if j == i:
continue
if max_n_ticks == 0:
ax.xaxis.set_major_locator(NullLocator())
ax.yaxis.set_major_locator(NullLocator())
else:
ax.xaxis.set_major_locator(MaxNLocator(max_n_ticks, prune="lower"))
ax.yaxis.set_major_locator(MaxNLocator(max_n_ticks, prune="lower"))
# Label axes.
sf = ScalarFormatter(useMathText=use_math_text)
ax.xaxis.set_major_formatter(sf)
ax.yaxis.set_major_formatter(sf)
if i < ndim - 1:
ax.set_xticklabels([])
else:
[l.set_rotation(45) for l in ax.get_xticklabels()]
ax.set_xlabel(labels[j], **label_kwargs)
ax.xaxis.set_label_coords(0.5, -0.3)
if j > 0:
ax.set_yticklabels([])
else:
[l.set_rotation(45) for l in ax.get_yticklabels()]
ax.set_ylabel(labels[i], **label_kwargs)
ax.yaxis.set_label_coords(-0.3, 0.5)
ax.tick_params(axis='both', which='major', **tick_kwargs)
def corner(samples, paxes, weights=None, span=None, smooth=0.02,
color='black', hist_kwargs={}, hist2d_kwargs={}):
"""Make a smoothed cornerplot.
:param samples: `~numpy.ndarray` of shape (ndim, nsample)
The samples from which to construct histograms.
:param paxes: ndarray of pyplot.Axes of shape(ndim, ndim)
Axes into which to plot the histograms.
:param weights: ndarray of shape (nsample,), optional
Weights associated with each sample.
:param span: iterable with shape (ndim,), optional
A list where each element is either a length-2 tuple containing
lower and upper bounds or a float from `(0., 1.]` giving the
fraction of (weighted) samples to include. If a fraction is provided,
the bounds are chosen to be equal-tailed. An example would be::
span = [(0., 10.), 0.95, (5., 6.)]
Default is `0.999999426697` (5-sigma credible interval).
:param smooth : float or iterable with shape (ndim,), optional
The standard deviation (either a single value or a different value for
each subplot) for the Gaussian kernel used to smooth the 1-D and 2-D
marginalized posteriors, expressed as a fraction of the span.
Default is `0.02` (2% smoothing). If an integer is provided instead,
this will instead default to a simple (weighted) histogram with
`bins=smooth`.
:param color: str or iterable with shape (ndim,), optional
A `~matplotlib`-style color (either a single color or a different
value for each subplot) used when plotting the histograms.
Default is `'black'`.
:param hist_kwargs: dict, optional
Extra keyword arguments to send to the 1-D (smoothed) histograms.
:param hist2d_kwargs: dict, optional
Extra keyword arguments to send to the 2-D (smoothed) histograms.
:returns paxes:
"""
assert samples.ndim > 1
assert np.product(samples.shape[1:]) > samples.shape[0]
ndim = len(samples)
# Determine plotting bounds.
span = get_spans(span, samples, weights=weights)
# Setting up smoothing.
smooth = np.zeros(ndim) + smooth
# --- Now actually do the plotting-------
for i, xx in enumerate(samples):
x = xx.flatten()
sx = smooth[i]
# ---- Diagonal axes -----
ax = paxes[i, i]
marginal(x, ax, weights=weights, span=span[i], smooth=sx,
color=color, **hist_kwargs)
# --- Off-diagonal axis ----
for j, yy in enumerate(samples):
y = yy.flatten()
ax = paxes[i, j]
if j >= i:
continue
sy = smooth[j]
twodhist(y, x, weights=weights, ax=ax,
span=[span[j], span[i]], smooth=[sy, sx],
color=color, **hist2d_kwargs)
return paxes
def twodhist(x, y, ax=None, span=None, weights=None,
smooth=0.02, levels=None, color='gray',
plot_density=False, plot_contours=True, fill_contours=True,
contour_kwargs={}, contourf_kwargs={}, **kwargs):
"""Function called by :meth:`cornerplot` used to generate a 2-D histogram
or contour of samples.
Parameters
----------
x : interable with shape (nsamps,)
Sample positions in the first dimension.
y : iterable with shape (nsamps,)
Sample positions in the second dimension.
span : iterable with shape (ndim,), optional
A list where each element is either a length-2 tuple containing
lower and upper bounds or a float from `(0., 1.]` giving the
fraction of (weighted) samples to include. If a fraction is provided,
the bounds are chosen to be equal-tailed. An example would be::
span = [(0., 10.), 0.95, (5., 6.)]
Default is `0.999999426697` (5-sigma credible interval).
weights : iterable with shape (nsamps,)
Weights associated with the samples. Default is `None` (no weights).
levels : iterable, optional
The contour levels to draw. Default are `[0.5, 1, 1.5, 2]`-sigma.
ax : `~matplotlib.axes.Axes`, optional
An `~matplotlib.axes.axes` instance on which to add the 2-D histogram.
If not provided, a figure will be generated.
color : str, optional
The `~matplotlib`-style color used to draw lines and color cells
and contours. Default is `'gray'`.
plot_density : bool, optional
Whether to draw the density colormap. Default is `False`.
plot_contours : bool, optional
Whether to draw the contours. Default is `True`.
fill_contours : bool, optional
Whether to fill the contours. Default is `True`.
contour_kwargs : dict
Any additional keyword arguments to pass to the `contour` method.
contourf_kwargs : dict
Any additional keyword arguments to pass to the `contourf` method.
"""
# Determine plotting bounds.
span = get_spans(span, [x, y], weights=weights)
# Setting up smoothing.
smooth = np.zeros(2) + smooth
# --- Now actually do the plotting-------
# The default "sigma" contour levels.
if levels is None:
levels = 1.0 - np.exp(-0.5 * np.arange(0.5, 2.1, 0.5) ** 2)
# This "color map" is the list of colors for the contour levels if the
# contours are filled.
contour_cmap = get_cmap(color, levels)
# Initialize smoothing.
smooth = np.zeros(2) + np.array(smooth)
bins = []
svalues = []
for s in smooth:
if s > 1.0:
# If `s` > 1.0, the weighted histogram has
# `s` bins within the provided bounds.
bins.append(int(s))
svalues.append(0.)
else:
# If `s` < 1, oversample the data relative to the
# smoothing filter by a factor of 2, then use a Gaussian
# filter to smooth the results.
bins.append(int(round(2. / s)))
svalues.append(2.)
# We'll make the 2D histogram to directly estimate the density.
try:
H, X, Y = np.histogram2d(x.flatten(), y.flatten(), bins=bins,
range=list(map(np.sort, span)),
weights=weights)
except ValueError:
raise ValueError("It looks like at least one of your sample columns "
"have no dynamic range.")
# Smooth the results.
if not np.all(svalues == 0.):
H = norm_kde(H*1.0, svalues)
# Compute the density levels.
Hflat = H.flatten()
inds = np.argsort(Hflat)[::-1]
Hflat = Hflat[inds]
sm = np.cumsum(Hflat)
sm /= sm[-1]
V = np.empty(len(levels))
for i, v0 in enumerate(levels):
try:
V[i] = Hflat[sm <= v0][-1]
except:
V[i] = Hflat[0]
V.sort()
m = (np.diff(V) == 0)
if np.any(m) and plot_contours:
print("Too few points to create valid contours.")
while np.any(m):
V[np.where(m)[0][0]] *= 1.0 - 1e-4
m = (np.diff(V) == 0)
V.sort()
# Compute the bin centers.
X1, Y1 = 0.5 * (X[1:] + X[:-1]), 0.5 * (Y[1:] + Y[:-1])
# Extend the array for the sake of the contours at the plot edges.
H2 = H.min() + np.zeros((H.shape[0] + 4, H.shape[1] + 4))
H2[2:-2, 2:-2] = H
H2[2:-2, 1] = H[:, 0]
H2[2:-2, -2] = H[:, -1]
H2[1, 2:-2] = H[0]
H2[-2, 2:-2] = H[-1]
H2[1, 1] = H[0, 0]
H2[1, -2] = H[0, -1]
H2[-2, 1] = H[-1, 0]
H2[-2, -2] = H[-1, -1]
X2 = np.concatenate([X1[0] + np.array([-2, -1]) * np.diff(X1[:2]), X1,
X1[-1] + np.array([1, 2]) * np.diff(X1[-2:])])
Y2 = np.concatenate([Y1[0] + np.array([-2, -1]) * np.diff(Y1[:2]), Y1,
Y1[-1] + np.array([1, 2]) * np.diff(Y1[-2:])])
clevels = np.concatenate([[0], V, [H.max() * (1 + 1e-4)]])
# plot contour fills
if plot_contours and fill_contours and (ax is not None):
cfk = {}
cfk["colors"] = contour_cmap
cfk["antialiased"] = False
cfk.update(contourf_kwargs)
ax.contourf(X2, Y2, H2.T, clevels, **cfk)
# Plot the contour edge colors.
if plot_contours and (ax is not None):
ck = {}
ck["colors"] = color
ck.update(contour_kwargs)
ax.contour(X2, Y2, H2.T, V, **ck)
return X2, Y2, H2.T, V, clevels, ax
def marginal(x, ax=None, weights=None, span=None, smooth=0.02,
color='black', peak=None, **hist_kwargs):
"""Compute a marginalized (weighted) histogram, with smoothing.
"""
if span is None:
span = get_spans(span, np.atleast_2d(x), weights=weights)[0]
ax.set_xlim(span)
# Generate distribution.
if smooth > 1:
# If `sx` > 1, plot a weighted histogram
xx, bins, wght = x, int(round(smooth)), weights
else:
# If `sx` < 1, oversample the data relative to the
# smoothing filter by a factor of 10, then use a Gaussian
# filter to smooth the results.
bins = int(round(10. / smooth))
n, b = np.histogram(x, bins=bins, weights=weights,
range=np.sort(span))
n = norm_kde(n*1.0, 10.)
b0 = 0.5 * (b[1:] + b[:-1])
xx, bins, wght = b0, b, n
n, b = np.histogram(xx, bins=bins, weights=wght, range=np.sort(span))
if peak is not None:
wght = wght * peak / n.max()
n, b, _ = ax.hist(xx, bins=bins, weights=wght, range=np.sort(span),
color=color, **hist_kwargs)
ax.set_ylim([0., max(n) * 1.05])
def scatter(samples, paxes, **scatter_kwargs):
"""Overplot selected points on cornerplot.
"""
assert samples.ndim > 1
for i, xx in enumerate(samples):
x = xx.flatten()
for j, yy in enumerate(samples[:i]):
if j >= i:
continue
ax = paxes[i, j]
y = yy.flatten()
ax.scatter(y, x, **scatter_kwargs)
def get_spans(span, samples, weights=None):
"""Get ranges from percentiles of samples
Parameters
----------
samples : iterable of arrays
A sequence of arrays, one for each parameter.
Returns
-------
span : list of 2-tuples
A list of (xmin, xmax) for each parameter
"""
ndim = len(samples)
if span is None:
span = [0.999999426697 for i in range(ndim)]
span = list(span)
if len(span) != len(samples):
raise ValueError("Dimension mismatch between samples and span.")
for i, _ in enumerate(span):
try:
xmin, xmax = span[i]
except(TypeError):
q = [0.5 - 0.5 * span[i], 0.5 + 0.5 * span[i]]
span[i] = _quantile(samples[i], q, weights=weights)
return span
def quantile(xarr, q, weights=None):
"""Compute (weighted) quantiles from an input set of samples.
:param x: `~numpy.darray` with shape (nvar, nsamples)
The input array to compute quantiles of.
:param q: list of quantiles, from [0., 1.]
:param weights: shape (nsamples)
:returns quants: ndarray of shape (nvar, nq)
The quantiles of each varaible.
"""
qq = [_quantile(x, q, weights=weights) for x in xarr]
return np.array(qq)
def _quantile(x, q, weights=None):
"""Compute (weighted) quantiles from an input set of samples.
Parameters
----------
x : `~numpy.ndarray` with shape (nsamps,)
Input samples.
q : `~numpy.ndarray` with shape (nquantiles,)
The list of quantiles to compute from `[0., 1.]`.
weights : `~numpy.ndarray` with shape (nsamps,), optional
The associated weight from each sample.
Returns
-------
quantiles : `~numpy.ndarray` with shape (nquantiles,)
The weighted sample quantiles computed at `q`.
"""
# Initial check.
x = np.atleast_1d(x)
q = np.atleast_1d(q)
# Quantile check.
if np.any(q < 0.0) or np.any(q > 1.0):
raise ValueError("Quantiles must be between 0. and 1.")
if weights is None:
# If no weights provided, this simply calls `np.percentile`.
return np.percentile(x, list(100.0 * q))
else:
# If weights are provided, compute the weighted quantiles.
weights = np.atleast_1d(weights)
if len(x) != len(weights):
raise ValueError("Dimension mismatch: len(weights) != len(x).")
idx = np.argsort(x) # sort samples
sw = weights[idx] # sort weights
cdf = np.cumsum(sw)[:-1] # compute CDF
cdf /= cdf[-1] # normalize CDF
cdf = np.append(0, cdf) # ensure proper span
quantiles = np.interp(q, cdf, x[idx]).tolist()
return quantiles
def get_cmap(color, levels):
nl = len(levels)
from matplotlib.colors import colorConverter
rgba_color = colorConverter.to_rgba(color)
contour_cmap = [list(rgba_color) for l in levels] + [list(rgba_color)]
for i in range(nl+1):
contour_cmap[i][-1] *= float(i) / (len(levels) + 1)
return contour_cmap
def demo(ndim=3, nsample=int(1e4)):
from prospect.models.priors import Normal
means = np.random.uniform(-3, 3, size=(ndim,))
sigmas = np.random.uniform(1, 5, size=(ndim,))
labels = ["x{}".format(i) for i in range(ndim)]
prior = Normal(mean=means, sigma=sigmas)
samples = np.array([prior.sample() for i in range(nsample)]).T
print(samples.shape)
print(means)
print(sigmas)
fig, axes = pl.subplots(ndim, ndim)
axes = allcorner(samples, labels, axes, show_titles=True,
psamples=means[:, None])
pl.show()
return axes
|
bd-j/prospector
|
prospect/plotting/corner.py
|
Python
|
mit
| 23,308
|
[
"Gaussian"
] |
a0147c209b6f59fa4157ffd8b9584078405d35804deb930db8c1512959349476
|
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
"""
Interface with command line GULP.
http://projects.ivec.org
WARNING: you need to have GULP installed on your system.
"""
__author__ = "Bharat Medasani, Wenhao Sun"
__copyright__ = "Copyright 2013, The Materials Project"
__version__ = "1.0"
__maintainer__ = "Bharat Medasani"
__email__ = "bkmedasani@lbl.gov,wenhao@mit.edu"
__status__ = "Production"
__date__ = "$Jun 22, 2013M$"
import os
import re
import subprocess
from monty.tempfile import ScratchDir
from pymatgen.analysis.bond_valence import BVAnalyzer
from pymatgen.core.lattice import Lattice
from pymatgen.core.periodic_table import Element
from pymatgen.core.structure import Structure
from pymatgen.symmetry.analyzer import SpacegroupAnalyzer
_anions = set(map(Element, ["O", "S", "F", "Cl", "Br", "N", "P"]))
_cations = set(
map(
Element,
[
"Li",
"Na",
"K", # alkali metals
"Be",
"Mg",
"Ca", # alkaline metals
"Al",
"Sc",
"Ti",
"V",
"Cr",
"Mn",
"Fe",
"Co",
"Ni",
"Cu",
"Zn",
"Ge",
"As",
"Y",
"Zr",
"Nb",
"Mo",
"Tc",
"Ru",
"Rh",
"Pd",
"Ag",
"Cd",
"In",
"Sn",
"Sb",
"Hf",
"Ta",
"W",
"Re",
"Os",
"Ir",
"Pt",
"Au",
"Hg",
"Tl",
"Pb",
"Bi",
"La",
"Ce",
"Pr",
"Nd",
"Pm",
"Sm",
"Eu",
"Gd",
"Tb",
"Dy",
"Ho",
"Er",
"Tm",
"Yb",
"Lu",
],
)
)
_gulp_kw = {
# Control of calculation type
"angle",
"bond",
"cosmo",
"cosmic",
"cost",
"defect",
"distance",
"eem",
"efg",
"fit",
"free_energy",
"gasteiger",
"genetic",
"gradients",
"md",
"montecarlo",
"noautobond",
"noenergy",
"optimise",
"pot",
"predict",
"preserve_Q",
"property",
"phonon",
"qeq",
"qbond",
"single",
"sm",
"static_first",
"torsion",
"transition_state",
# Geometric variable specification
"breathe",
"bulk_noopt",
"cellonly",
"conp",
"conv",
"isotropic",
"orthorhombic",
"nobreathe",
"noflgs",
"shell",
"unfix",
# Algorithm
"c6",
"dipole",
"fbfgs",
"fix_molecule",
"full",
"hill",
"kfull",
"marvinSE",
"madelung",
"minimum_image",
"molecule",
"molmec",
"molq",
"newda",
"noanisotropic_2b",
"nod2sym",
"nodsymmetry",
"noelectrostatics",
"noexclude",
"nofcentral",
"nofirst_point",
"noksymmetry",
"nolist_md",
"nomcediff",
"nonanal",
"noquicksearch",
"noreal",
"norecip",
"norepulsive",
"nosasinitevery",
"nosderv",
"nozeropt",
"numerical",
"qiter",
"qok",
"spatial",
"storevectors",
"nomolecularinternalke",
"voight",
"zsisa",
# Optimisation method
"conjugate",
"dfp",
"lbfgs",
"numdiag",
"positive",
"rfo",
"unit",
# Output control
"average",
"broaden_dos",
"cartesian",
"compare",
"conserved",
"dcharge",
"dynamical_matrix",
"eigenvectors",
"global",
"hessian",
"hexagonal",
"intensity",
"linmin",
"meanke",
"nodensity_out",
"nodpsym",
"nofirst_point",
"nofrequency",
"nokpoints",
"operators",
"outcon",
"prt_eam",
"prt_two",
"prt_regi_before",
"qsas",
"restore",
"save",
"terse",
# Structure control
"full",
"hexagonal",
"lower_symmetry",
"nosymmetry",
# PDF control
"PDF",
"PDFcut",
"PDFbelow",
"PDFkeep",
"coreinfo",
"nowidth",
"nopartial",
# Miscellaneous
"nomodcoord",
"oldunits",
"zero_potential",
}
class GulpIO:
"""
To generate GULP input and process output
"""
@staticmethod
def keyword_line(*args):
"""
Checks if the input args are proper gulp keywords and
generates the 1st line of gulp input. Full keywords are expected.
Args:
args: 1st line keywords
"""
# if len(list(filter(lambda x: x in _gulp_kw, args))) != len(args):
# raise GulpError("Wrong keywords given")
gin = " ".join(args)
gin += "\n"
return gin
@staticmethod
def structure_lines(
structure,
cell_flg=True,
frac_flg=True,
anion_shell_flg=True,
cation_shell_flg=False,
symm_flg=True,
):
"""
Generates GULP input string corresponding to pymatgen structure.
Args:
structure: pymatgen Structure object
cell_flg (default = True): Option to use lattice parameters.
fractional_flg (default = True): If True, fractional coordinates
are used. Else, cartesian coordinates in Angstroms are used.
******
GULP convention is to use fractional coordinates for periodic
structures and cartesian coordinates for non-periodic
structures.
******
anion_shell_flg (default = True): If True, anions are considered
polarizable.
cation_shell_flg (default = False): If True, cations are
considered polarizable.
symm_flg (default = True): If True, symmetry information is also
written.
Returns:
string containing structure for GULP input
"""
gin = ""
if cell_flg:
gin += "cell\n"
l = structure.lattice
lat_str = f"{l.a:6f} {l.b:6f} {l.c:6f} {l.alpha:6f} {l.beta:6f} {l.gamma:6f}"
gin += lat_str + "\n"
if frac_flg:
gin += "frac\n"
coord_attr = "frac_coords"
else:
gin += "cart\n"
coord_attr = "coords"
for site in structure.sites:
coord = [str(i) for i in getattr(site, coord_attr)]
specie = site.specie
core_site_desc = specie.symbol + " core " + " ".join(coord) + "\n"
gin += core_site_desc
if (specie in _anions and anion_shell_flg) or (specie in _cations and cation_shell_flg):
shel_site_desc = specie.symbol + " shel " + " ".join(coord) + "\n"
gin += shel_site_desc
else:
pass
if symm_flg:
gin += "space\n"
gin += str(SpacegroupAnalyzer(structure).get_space_group_number()) + "\n"
return gin
@staticmethod
def specie_potential_lines(structure, potential, **kwargs):
"""
Generates GULP input specie and potential string for pymatgen
structure.
Args:
structure: pymatgen.core.structure.Structure object
potential: String specifying the type of potential used
kwargs: Additional parameters related to potential. For
potential == "buckingham",
anion_shell_flg (default = False):
If True, anions are considered polarizable.
anion_core_chrg=float
anion_shell_chrg=float
cation_shell_flg (default = False):
If True, cations are considered polarizable.
cation_core_chrg=float
cation_shell_chrg=float
Returns:
string containing specie and potential specification for gulp
input.
"""
raise NotImplementedError("gulp_specie_potential not yet implemented.\nUse library_line instead")
@staticmethod
def library_line(file_name):
"""
Specifies GULP library file to read species and potential parameters.
If using library don't specify species and potential
in the input file and vice versa. Make sure the elements of
structure are in the library file.
Args:
file_name: Name of GULP library file
Returns:
GULP input string specifying library option
"""
gulplib_set = "GULP_LIB" in os.environ.keys()
def readable(f):
return os.path.isfile(f) and os.access(f, os.R_OK)
gin = ""
dirpath, fname = os.path.split(file_name)
if dirpath and readable(file_name): # Full path specified
gin = "library " + file_name
else:
fpath = os.path.join(os.getcwd(), file_name) # Check current dir
if readable(fpath):
gin = "library " + fpath
elif gulplib_set: # Check the GULP_LIB path
fpath = os.path.join(os.environ["GULP_LIB"], file_name)
if readable(fpath):
gin = "library " + file_name
if gin:
return gin + "\n"
raise GulpError("GULP Library not found")
def buckingham_input(self, structure, keywords, library=None, uc=True, valence_dict=None):
"""
Gets a GULP input for an oxide structure and buckingham potential
from library.
Args:
structure: pymatgen.core.structure.Structure
keywords: GULP first line keywords.
library (Default=None): File containing the species and potential.
uc (Default=True): Unit Cell Flag.
valence_dict: {El: valence}
"""
gin = self.keyword_line(*keywords)
gin += self.structure_lines(structure, symm_flg=not uc)
if not library:
gin += self.buckingham_potential(structure, valence_dict)
else:
gin += self.library_line(library)
return gin
@staticmethod
def buckingham_potential(structure, val_dict=None):
"""
Generate species, buckingham, and spring options for an oxide structure
using the parameters in default libraries.
Ref:
1. G.V. Lewis and C.R.A. Catlow, J. Phys. C: Solid State Phys.,
18, 1149-1161 (1985)
2. T.S.Bush, J.D.Gale, C.R.A.Catlow and P.D. Battle,
J. Mater Chem., 4, 831-837 (1994)
Args:
structure: pymatgen.core.structure.Structure
val_dict (Needed if structure is not charge neutral): {El:valence}
dict, where El is element.
"""
if not val_dict:
try:
# If structure is oxidation state decorated, use that first.
el = [site.specie.symbol for site in structure]
valences = [site.specie.oxi_state for site in structure]
val_dict = dict(zip(el, valences))
except AttributeError:
bv = BVAnalyzer()
el = [site.specie.symbol for site in structure]
valences = bv.get_valences(structure)
val_dict = dict(zip(el, valences))
# Try bush library first
bpb = BuckinghamPotential("bush")
bpl = BuckinghamPotential("lewis")
gin = ""
for key in val_dict.keys():
use_bush = True
el = re.sub(r"[1-9,+,\-]", "", key)
if el not in bpb.species_dict.keys():
use_bush = False
elif val_dict[key] != bpb.species_dict[el]["oxi"]:
use_bush = False
if use_bush:
gin += "species \n"
gin += bpb.species_dict[el]["inp_str"]
gin += "buckingham \n"
gin += bpb.pot_dict[el]
gin += "spring \n"
gin += bpb.spring_dict[el]
continue
# Try lewis library next if element is not in bush
# use_lewis = True
if el != "O": # For metals the key is "Metal_OxiState+"
k = el + "_" + str(int(val_dict[key])) + "+"
if k not in bpl.species_dict.keys():
# use_lewis = False
raise GulpError(f"Element {k} not in library")
gin += "species\n"
gin += bpl.species_dict[k]
gin += "buckingham\n"
gin += bpl.pot_dict[k]
else:
gin += "species\n"
k = "O_core"
gin += bpl.species_dict[k]
k = "O_shel"
gin += bpl.species_dict[k]
gin += "buckingham\n"
gin += bpl.pot_dict[key]
gin += "spring\n"
gin += bpl.spring_dict[key]
return gin
def tersoff_input(self, structure, periodic=False, uc=True, *keywords):
"""
Gets a GULP input with Tersoff potential for an oxide structure
Args:
structure: pymatgen.core.structure.Structure
periodic (Default=False): Flag denoting whether periodic
boundary conditions are used
library (Default=None): File containing the species and potential.
uc (Default=True): Unit Cell Flag.
keywords: GULP first line keywords.
"""
# gin="static noelectrostatics \n "
gin = self.keyword_line(*keywords)
gin += self.structure_lines(
structure,
cell_flg=periodic,
frac_flg=periodic,
anion_shell_flg=False,
cation_shell_flg=False,
symm_flg=not uc,
)
gin += self.tersoff_potential(structure)
return gin
@staticmethod
def tersoff_potential(structure):
"""
Generate the species, tersoff potential lines for an oxide structure
Args:
structure: pymatgen.core.structure.Structure
"""
bv = BVAnalyzer()
el = [site.specie.symbol for site in structure]
valences = bv.get_valences(structure)
el_val_dict = dict(zip(el, valences))
gin = "species \n"
qerfstring = "qerfc\n"
for key, value in el_val_dict.items():
if key != "O" and value % 1 != 0:
raise SystemError("Oxide has mixed valence on metal")
specie_string = key + " core " + str(value) + "\n"
gin += specie_string
qerfstring += key + " " + key + " 0.6000 10.0000 \n"
gin += "# noelectrostatics \n Morse \n"
met_oxi_ters = TersoffPotential().data
for key, value in el_val_dict.items():
if key != "O":
metal = key + "(" + str(int(value)) + ")"
ters_pot_str = met_oxi_ters[metal]
gin += ters_pot_str
gin += qerfstring
return gin
@staticmethod
def get_energy(gout):
"""
Args:
gout ():
Returns:
Energy
"""
energy = None
for line in gout.split("\n"):
if "Total lattice energy" in line and "eV" in line:
energy = line.split()
elif "Non-primitive unit cell" in line and "eV" in line:
energy = line.split()
if energy:
return float(energy[4])
raise GulpError("Energy not found in Gulp output")
@staticmethod
def get_relaxed_structure(gout):
"""
Args:
gout ():
Returns:
(Structure) relaxed structure.
"""
# Find the structure lines
structure_lines = []
cell_param_lines = []
output_lines = gout.split("\n")
no_lines = len(output_lines)
i = 0
# Compute the input lattice parameters
while i < no_lines:
line = output_lines[i]
if "Full cell parameters" in line:
i += 2
line = output_lines[i]
a = float(line.split()[8])
alpha = float(line.split()[11])
line = output_lines[i + 1]
b = float(line.split()[8])
beta = float(line.split()[11])
line = output_lines[i + 2]
c = float(line.split()[8])
gamma = float(line.split()[11])
i += 3
break
if "Cell parameters" in line:
i += 2
line = output_lines[i]
a = float(line.split()[2])
alpha = float(line.split()[5])
line = output_lines[i + 1]
b = float(line.split()[2])
beta = float(line.split()[5])
line = output_lines[i + 2]
c = float(line.split()[2])
gamma = float(line.split()[5])
i += 3
break
i += 1
while i < no_lines:
line = output_lines[i]
if "Final fractional coordinates of atoms" in line:
# read the site coordinates in the following lines
i += 6
line = output_lines[i]
while line[0:2] != "--":
structure_lines.append(line)
i += 1
line = output_lines[i]
# read the cell parameters
i += 9
line = output_lines[i]
if "Final cell parameters" in line:
i += 3
for del_i in range(6):
line = output_lines[i + del_i]
cell_param_lines.append(line)
break
i += 1
# Process the structure lines
if structure_lines:
sp = []
coords = []
for line in structure_lines:
fields = line.split()
if fields[2] == "c":
sp.append(fields[1])
coords.append(list(float(x) for x in fields[3:6]))
else:
raise OSError("No structure found")
if cell_param_lines:
a = float(cell_param_lines[0].split()[1])
b = float(cell_param_lines[1].split()[1])
c = float(cell_param_lines[2].split()[1])
alpha = float(cell_param_lines[3].split()[1])
beta = float(cell_param_lines[4].split()[1])
gamma = float(cell_param_lines[5].split()[1])
latt = Lattice.from_parameters(a, b, c, alpha, beta, gamma)
return Structure(latt, sp, coords)
class GulpCaller:
"""
Class to run gulp from commandline
"""
def __init__(self, cmd="gulp"):
"""
Initialize with the executable if not in the standard path
Args:
cmd: Command. Defaults to gulp.
"""
def is_exe(f):
return os.path.isfile(f) and os.access(f, os.X_OK)
fpath, fname = os.path.split(cmd)
if fpath:
if is_exe(cmd):
self._gulp_cmd = cmd
return
else:
for path in os.environ["PATH"].split(os.pathsep):
path = path.strip('"')
file = os.path.join(path, cmd)
if is_exe(file):
self._gulp_cmd = file
return
raise GulpError("Executable not found")
def run(self, gin):
"""
Run GULP using the gin as input
Args:
gin: GULP input string
Returns:
gout: GULP output string
"""
with ScratchDir("."):
with subprocess.Popen(
self._gulp_cmd,
stdout=subprocess.PIPE,
stdin=subprocess.PIPE,
stderr=subprocess.PIPE,
) as p:
out, err = p.communicate(bytearray(gin, "utf-8"))
out = out.decode("utf-8")
err = err.decode("utf-8")
if "Error" in err or "error" in err:
print(gin)
print("----output_0---------")
print(out)
print("----End of output_0------\n\n\n")
print("----output_1--------")
print(out)
print("----End of output_1------")
raise GulpError(err)
# We may not need this
if "ERROR" in out:
raise GulpError(out)
# Sometimes optimisation may fail to reach convergence
conv_err_string = "Conditions for a minimum have not been satisfied"
if conv_err_string in out:
raise GulpConvergenceError()
gout = ""
for line in out.split("\n"):
gout = gout + line + "\n"
return gout
def get_energy_tersoff(structure, gulp_cmd="gulp"):
"""
Compute the energy of a structure using Tersoff potential.
Args:
structure: pymatgen.core.structure.Structure
gulp_cmd: GULP command if not in standard place
"""
gio = GulpIO()
gc = GulpCaller(gulp_cmd)
gin = gio.tersoff_input(structure)
gout = gc.run(gin)
return gio.get_energy(gout)
def get_energy_buckingham(structure, gulp_cmd="gulp", keywords=("optimise", "conp", "qok"), valence_dict=None):
"""
Compute the energy of a structure using Buckingham potential.
Args:
structure: pymatgen.core.structure.Structure
gulp_cmd: GULP command if not in standard place
keywords: GULP first line keywords
valence_dict: {El: valence}. Needed if the structure is not charge
neutral.
"""
gio = GulpIO()
gc = GulpCaller(gulp_cmd)
gin = gio.buckingham_input(structure, keywords, valence_dict=valence_dict)
gout = gc.run(gin)
return gio.get_energy(gout)
def get_energy_relax_structure_buckingham(structure, gulp_cmd="gulp", keywords=("optimise", "conp"), valence_dict=None):
"""
Relax a structure and compute the energy using Buckingham potential.
Args:
structure: pymatgen.core.structure.Structure
gulp_cmd: GULP command if not in standard place
keywords: GULP first line keywords
valence_dict: {El: valence}. Needed if the structure is not charge
neutral.
"""
gio = GulpIO()
gc = GulpCaller(gulp_cmd)
gin = gio.buckingham_input(structure, keywords, valence_dict=valence_dict)
gout = gc.run(gin)
energy = gio.get_energy(gout)
relax_structure = gio.get_relaxed_structure(gout)
return energy, relax_structure
class GulpError(Exception):
"""
Exception class for GULP.
Raised when the GULP gives an error
"""
def __init__(self, msg):
"""
Args:
msg (str): Message
"""
self.msg = msg
def __str__(self):
return "GulpError : " + self.msg
class GulpConvergenceError(Exception):
"""
Exception class for GULP.
Raised when proper convergence is not reached in Mott-Littleton
defect energy optimisation procedure in GULP
"""
def __init__(self, msg=""):
"""
Args:
msg (str): Message
"""
self.msg = msg
def __str__(self):
return self.msg
class BuckinghamPotential:
"""
Generate the Buckingham Potential Table from the bush.lib and lewis.lib.
Ref:
T.S.Bush, J.D.Gale, C.R.A.Catlow and P.D. Battle, J. Mater Chem.,
4, 831-837 (1994).
G.V. Lewis and C.R.A. Catlow, J. Phys. C: Solid State Phys., 18,
1149-1161 (1985)
"""
def __init__(self, bush_lewis_flag):
"""
Args:
bush_lewis_flag (str): Flag for using Bush or Lewis potential.
"""
assert bush_lewis_flag in {"bush", "lewis"}
pot_file = "bush.lib" if bush_lewis_flag == "bush" else "lewis.lib"
with open(os.path.join(os.environ["GULP_LIB"], pot_file)) as f:
# In lewis.lib there is no shell for cation
species_dict, pot_dict, spring_dict = {}, {}, {}
sp_flg, pot_flg, spring_flg = False, False, False
for row in f:
if row[0] == "#":
continue
if row.split()[0] == "species":
sp_flg, pot_flg, spring_flg = True, False, False
continue
if row.split()[0] == "buckingham":
sp_flg, pot_flg, spring_flg = False, True, False
continue
if row.split()[0] == "spring":
sp_flg, pot_flg, spring_flg = False, False, True
continue
elmnt = row.split()[0]
if sp_flg:
if bush_lewis_flag == "bush":
if elmnt not in species_dict.keys():
species_dict[elmnt] = {"inp_str": "", "oxi": 0}
species_dict[elmnt]["inp_str"] += row
species_dict[elmnt]["oxi"] += float(row.split()[2])
elif bush_lewis_flag == "lewis":
if elmnt == "O":
if row.split()[1] == "core":
species_dict["O_core"] = row
if row.split()[1] == "shel":
species_dict["O_shel"] = row
else:
metal = elmnt.split("_")[0]
# oxi_state = metaloxi.split('_')[1][0]
species_dict[elmnt] = metal + " core " + row.split()[2] + "\n"
continue
if pot_flg:
if bush_lewis_flag == "bush":
pot_dict[elmnt] = row
elif bush_lewis_flag == "lewis":
if elmnt == "O":
pot_dict["O"] = row
else:
metal = elmnt.split("_")[0]
# oxi_state = metaloxi.split('_')[1][0]
pot_dict[elmnt] = metal + " " + " ".join(row.split()[1:]) + "\n"
continue
if spring_flg:
spring_dict[elmnt] = row
if bush_lewis_flag == "bush":
# Fill the null keys in spring dict with empty strings
for key in pot_dict.keys():
if key not in spring_dict.keys():
spring_dict[key] = ""
self.species_dict = species_dict
self.pot_dict = pot_dict
self.spring_dict = spring_dict
class TersoffPotential:
"""
Generate Tersoff Potential Table from "OxideTersoffPotentialentials" file
"""
def __init__(self):
"""
Init TersoffPotential
"""
module_dir = os.path.dirname(os.path.abspath(__file__))
with open(os.path.join(module_dir, "OxideTersoffPotentials")) as f:
data = {}
for row in f:
metaloxi = row.split()[0]
line = row.split(")")
data[metaloxi] = line[1]
self.data = data
|
materialsproject/pymatgen
|
pymatgen/command_line/gulp_caller.py
|
Python
|
mit
| 27,411
|
[
"GULP",
"pymatgen"
] |
fb1929df8f39415bb9b1a2ccd19d5d95ad5313a2ea86329b643db0a95efc0f07
|
# -*- coding: utf-8 -*-
#
# This file is part of Invenio-Client.
# Copyright (C) 2014 CERN.
#
# Invenio-Client is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 2 of the
# License, or (at your option) any later version.
#
# Invenio-Client is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Invenio; if not, write to the Free Software Foundation, Inc.,
# 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
#
# In applying this licence, CERN does not waive the privileges and immunities
# granted to it by virtue of its status as an Intergovernmental Organization
# or submit itself to any jurisdiction.
"""CERN Document Server specific connector."""
import splinter
from invenio_client import InvenioConnector
class CDSInvenioConnector(InvenioConnector):
__url__ = "http://cds.cern.ch/"
def __init__(self, user="", password=""):
"""Use to connect to the CERN Document Server (CDS).
.. note:: It uses centralized SSO for authentication.
"""
cds_url = self.__url__
if user:
cds_url = cds_url.replace('http', 'https')
super(CDSInvenioConnector, self).__init__(
cds_url, user, password)
def _init_browser(self):
"""Update this everytime the CERN SSO login form is refactored."""
self.browser = splinter.Browser('phantomjs')
self.browser.visit(self.server_url)
self.browser.find_link_by_partial_text("Sign in").click()
self.browser.fill(
'ctl00$ctl00$NICEMasterPageBodyContent$SiteContentPlaceholder$'
'txtFormsLogin', self.user)
self.browser.fill(
'ctl00$ctl00$NICEMasterPageBodyContent$SiteContentPlaceholder$'
'txtFormsPassword', self.password)
self.browser.find_by_css('input[type=submit]').click()
self.browser.find_by_css('input[type=submit]').click()
__all__ = ('CDSInvenioConnector', )
|
inveniosoftware/invenio-client
|
invenio_client/contrib/cds.py
|
Python
|
gpl-2.0
| 2,282
|
[
"VisIt"
] |
e611714d7caad13241acbefa196ff5c1fffdc55b78b4d879cf8f3ff0a3318807
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2015, Heyo
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# this is a windows documentation stub. actual code lives in the .ps1
# file of the same name
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = r'''
---
module: win_nssm
version_added: "2.0"
short_description: NSSM - the Non-Sucking Service Manager
description:
- nssm is a service helper which doesn't suck. See U(https://nssm.cc/) for more information.
requirements:
- "nssm >= 2.24.0 # (install via M(win_chocolatey)) C(win_chocolatey: name=nssm)"
options:
name:
description:
- Name of the service to operate on
required: true
state:
description:
- State of the service on the system
- Note that NSSM actions like "pause", "continue", "rotate" do not fit the declarative style of ansible, so these should be implemented via the
ansible command module
choices:
- present
- started
- stopped
- restarted
- absent
default: started
application:
description:
- The application binary to run as a service
- "Specify this whenever the service may need to be installed (state: present, started, stopped, restarted)"
- "Note that the application name must look like the following, if the directory includes spaces:"
- 'nssm install service "c:\\Program Files\\app.exe\\" "C:\\Path with spaces\\"'
- >
See commit 0b386fc1984ab74ee59b7bed14b7e8f57212c22b in the nssm.git project for more info:
U(https://git.nssm.cc/?p=nssm.git;a=commit;h=0b386fc1984ab74ee59b7bed14b7e8f57212c22b)
stdout_file:
description:
- Path to receive output
stderr_file:
description:
- Path to receive error output
app_parameters:
description:
- Parameters to be passed to the application when it starts.
- Use either this or C(app_parameters_free_form), not both
app_parameters_free_form:
version_added: "2.3.0"
description:
- Single string of parameters to be passed to the service.
- Use either this or C(app_parameters), not both
dependencies:
description:
- Service dependencies that has to be started to trigger startup, separated by comma.
user:
description:
- User to be used for service startup
password:
description:
- Password to be used for service startup
start_mode:
description:
- If C(auto) is selected, the service will start at bootup.
- C(delayed) causes a delayed but automatic start after boot (added in version 2.5).
- C(manual) means that the service will start only when another service needs it.
- C(disabled) means that the service will stay off, regardless if it is needed or not.
default: auto
choices:
- auto
- delayed
- manual
- disabled
author:
- "Adam Keech (@smadam813)"
- "George Frank (@georgefrank)"
- "Hans-Joachim Kliemeck (@h0nIg)"
- "Michael Wild (@themiwi)"
'''
EXAMPLES = r'''
# Install and start the foo service
- win_nssm:
name: foo
application: C:\windows\foo.exe
# Install and start the foo service with a key-value pair argument
# This will yield the following command: C:\windows\foo.exe bar "true"
- win_nssm:
name: foo
application: C:\windows\foo.exe
app_parameters:
bar: true
# Install and start the foo service with a key-value pair argument, where the argument needs to start with a dash
# This will yield the following command: C:\windows\\foo.exe -bar "true"
- win_nssm:
name: foo
application: C:\windows\foo.exe
app_parameters:
"-bar": true
# Install and start the foo service with a single parameter
# This will yield the following command: C:\windows\\foo.exe bar
- win_nssm:
name: foo
application: C:\windows\foo.exe
app_parameters:
_: bar
# Install and start the foo service with a mix of single params, and key value pairs
# This will yield the following command: C:\windows\\foo.exe bar -file output.bat
- win_nssm:
name: foo
application: C:\windows\foo.exe
app_parameters:
_: bar
"-file": "output.bat"
# Use the single line parameters option to specify an arbitrary string of parameters
# for the service executable
- name: Make sure the Consul service runs
win_nssm:
name: consul
application: C:\consul\consul.exe
app_parameters_free_form: agent -config-dir=C:\consul\config
stdout_file: C:\consul\log.txt
stderr_file: C:\consul\error.txt
# Install and start the foo service, redirecting stdout and stderr to the same file
- win_nssm:
name: foo
application: C:\windows\foo.exe
stdout_file: C:\windows\foo.log
stderr_file: C:\windows\foo.log
# Install and start the foo service, but wait for dependencies tcpip and adf
- win_nssm:
name: foo
application: C:\windows\foo.exe
dependencies: 'adf,tcpip'
# Install and start the foo service with dedicated user
- win_nssm:
name: foo
application: C:\windows\foo.exe
user: foouser
password: secret
# Install the foo service but do not start it automatically
- win_nssm:
name: foo
application: C:\windows\foo.exe
state: present
start_mode: manual
# Remove the foo service
- win_nssm:
name: foo
state: absent
'''
|
ravibhure/ansible
|
lib/ansible/modules/windows/win_nssm.py
|
Python
|
gpl-3.0
| 6,003
|
[
"ADF"
] |
fa0ce13e77f980b02c051f4a25116f54bd8011c9d9541ab534b95070bf051589
|
"""edc_pharmacy URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.10/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.conf.urls import url, include
2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))
"""
from django.conf.urls import url, include
from django.contrib import admin
from django.contrib.auth import views as auth_views
from edc_base.views import LoginView, LogoutView
from .admin_site import ba_namotswe_admin, ba_namotswe_historical_admin
from edc_metadata.admin import edc_metadata_admin
from edc_registration.admin import edc_registration_admin
from edc_consent.views import HomeView as EdcConsentHomeView
from edc_consent.admin_site import edc_consent_admin
from edc_identifier.admin_site import edc_identifier_admin
from .views import HomeView
urlpatterns = [
url(r'^dashboard/', include('ba_namotswe_dashboard.urls')),
url(r'login', LoginView.as_view(), name='login_url'),
url(r'logout', LogoutView.as_view(pattern_name='login_url'), name='logout_url'),
url(r'^admin/', ba_namotswe_admin.urls),
url(r'^admin/', ba_namotswe_historical_admin.urls),
url(r'^admin/', admin.site.urls),
url(r'^admin/password_reset/$', auth_views.password_reset, name='admin_password_reset'),
url(r'^admin/password_reset/done/$', auth_views.password_reset_done, name='password_reset_done'),
url(r'^edc_metadata/', edc_metadata_admin.urls),
url(r'^edc_registration/', edc_registration_admin.urls),
url(r'^edc_consent/admin/', edc_consent_admin.urls),
url(r'^edc_visit_schedule/', include('edc_visit_schedule.urls', namespace='edc-visit-schedule')),
url(r'^edc_consent/', EdcConsentHomeView.as_view(), name='edc-consent-url'),
url(r'^edc_identifier/', edc_identifier_admin.urls),
url(r'^edc/', include('edc_base.urls', namespace='edc-base')),
url(r'^(?P<subject_identifier>[0-9\-]{14})/$', HomeView.as_view(), name='home_url'),
url(r'^reset/(?P<uidb64>[0-9A-Za-z_\-]+)/(?P<token>.+)/$', auth_views.password_reset_confirm, name='password_reset_confirm'),
url(r'^reset/done/$', auth_views.password_reset_complete, name='password_reset_complete'),
url(r'^', HomeView.as_view(), name='home_url'),
]
|
botswana-harvard/ba-namotswe
|
ba_namotswe/urls.py
|
Python
|
gpl-3.0
| 2,610
|
[
"VisIt"
] |
43ad7fd303f330d04daa4f0673baf10af7d520f8bfd57356d3476d4864df05d7
|
# Copyright 2013-2020 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class RGoseq(RPackage):
"""Gene Ontology analyser for RNA-seq and other length biased data.
Detects Gene Ontology and/or other user defined categories which are
over/under represented in RNA-seq data"""
homepage = "https://bioconductor.org/packages/goseq"
git = "https://git.bioconductor.org/packages/goseq.git"
version('1.36.0', commit='26c9f7de18889afeee1b571ca1c4ab4d2877ab80')
version('1.34.1', commit='bad217b42cc34423698fbcf701d4e3591aac4474')
version('1.32.0', commit='32fcbe647eea17d7d0d7a262610811502c421d36')
version('1.30.0', commit='fa8cafe0766ed0b6a97a4ed3374a709ed9d1daf1')
version('1.28.0', commit='ed0ce332a8972618d740d8a93711dff994657738')
depends_on('r@2.11.0:', type=('build', 'run'))
depends_on('r-biasedurn', type=('build', 'run'))
depends_on('r-genelendatabase@1.9.2:', type=('build', 'run'))
depends_on('r-mgcv', type=('build', 'run'))
depends_on('r-annotationdbi', type=('build', 'run'))
depends_on('r-go-db', type=('build', 'run'))
depends_on('r-biocgenerics', type=('build', 'run'))
|
iulian787/spack
|
var/spack/repos/builtin/packages/r-goseq/package.py
|
Python
|
lgpl-2.1
| 1,312
|
[
"Bioconductor"
] |
3912ce7c1530b8c0b97ca7cc87f8b3bb85bc2fd4ccf041e3f12fb63a7d7b170b
|
from rope.base import libutils
from rope.base import pyobjects, exceptions, stdmods
from rope.refactor import occurrences
from rope.refactor.importutils import importinfo
class ImportInfoVisitor(object):
def dispatch(self, import_):
try:
method_name = "visit" + import_.import_info.__class__.__name__
method = getattr(self, method_name)
return method(import_, import_.import_info)
except exceptions.ModuleNotFoundError:
pass
def visitEmptyImport(self, import_stmt, import_info):
pass
def visitNormalImport(self, import_stmt, import_info):
pass
def visitFromImport(self, import_stmt, import_info):
pass
class RelativeToAbsoluteVisitor(ImportInfoVisitor):
def __init__(self, project, current_folder):
self.to_be_absolute = []
self.project = project
self.folder = current_folder
self.context = importinfo.ImportContext(project, current_folder)
def visitNormalImport(self, import_stmt, import_info):
self.to_be_absolute.extend(self._get_relative_to_absolute_list(import_info))
new_pairs = []
for name, alias in import_info.names_and_aliases:
resource = self.project.find_module(name, folder=self.folder)
if resource is None:
new_pairs.append((name, alias))
continue
absolute_name = libutils.modname(resource)
new_pairs.append((absolute_name, alias))
if not import_info._are_name_and_alias_lists_equal(
new_pairs, import_info.names_and_aliases
):
import_stmt.import_info = importinfo.NormalImport(new_pairs)
def _get_relative_to_absolute_list(self, import_info):
result = []
for name, alias in import_info.names_and_aliases:
if alias is not None:
continue
resource = self.project.find_module(name, folder=self.folder)
if resource is None:
continue
absolute_name = libutils.modname(resource)
if absolute_name != name:
result.append((name, absolute_name))
return result
def visitFromImport(self, import_stmt, import_info):
resource = import_info.get_imported_resource(self.context)
if resource is None:
return None
absolute_name = libutils.modname(resource)
if import_info.module_name != absolute_name:
import_stmt.import_info = importinfo.FromImport(
absolute_name, 0, import_info.names_and_aliases
)
class FilteringVisitor(ImportInfoVisitor):
def __init__(self, project, folder, can_select):
self.to_be_absolute = []
self.project = project
self.can_select = self._transform_can_select(can_select)
self.context = importinfo.ImportContext(project, folder)
def _transform_can_select(self, can_select):
def can_select_name_and_alias(name, alias):
imported = name
if alias is not None:
imported = alias
return can_select(imported)
return can_select_name_and_alias
def visitNormalImport(self, import_stmt, import_info):
new_pairs = []
for name, alias in import_info.names_and_aliases:
if self.can_select(name, alias):
new_pairs.append((name, alias))
return importinfo.NormalImport(new_pairs)
def visitFromImport(self, import_stmt, import_info):
if _is_future(import_info):
return import_info
new_pairs = []
if import_info.is_star_import():
for name in import_info.get_imported_names(self.context):
if self.can_select(name, None):
new_pairs.append(import_info.names_and_aliases[0])
break
else:
for name, alias in import_info.names_and_aliases:
if self.can_select(name, alias):
new_pairs.append((name, alias))
return importinfo.FromImport(
import_info.module_name, import_info.level, new_pairs
)
class RemovingVisitor(ImportInfoVisitor):
def __init__(self, project, folder, can_select):
self.to_be_absolute = []
self.project = project
self.filtering = FilteringVisitor(project, folder, can_select)
def dispatch(self, import_):
result = self.filtering.dispatch(import_)
if result is not None:
import_.import_info = result
class AddingVisitor(ImportInfoVisitor):
"""A class for adding imports
Given a list of `ImportInfo`, it tries to add each import to the
module and returns `True` and gives up when an import can be added
to older ones.
"""
def __init__(self, project, import_list):
self.project = project
self.import_list = import_list
self.import_info = None
def dispatch(self, import_):
for import_info in self.import_list:
self.import_info = import_info
if ImportInfoVisitor.dispatch(self, import_):
return True
# TODO: Handle adding relative and absolute imports
def visitNormalImport(self, import_stmt, import_info):
if not isinstance(self.import_info, import_info.__class__):
return False
# Adding ``import x`` and ``import x.y`` that results ``import x.y``
if (
len(import_info.names_and_aliases)
== len(self.import_info.names_and_aliases)
== 1
):
imported1 = import_info.names_and_aliases[0]
imported2 = self.import_info.names_and_aliases[0]
if imported1[1] == imported2[1] is None:
if imported1[0].startswith(imported2[0] + "."):
return True
if imported2[0].startswith(imported1[0] + "."):
import_stmt.import_info = self.import_info
return True
# Multiple imports using a single import statement is discouraged
# so we won't bother adding them.
if self.import_info._are_name_and_alias_lists_equal(
import_info.names_and_aliases, self.import_info.names_and_aliases
):
return True
def visitFromImport(self, import_stmt, import_info):
if (
isinstance(self.import_info, import_info.__class__)
and import_info.module_name == self.import_info.module_name
and import_info.level == self.import_info.level
):
if import_info.is_star_import():
return True
if self.import_info.is_star_import():
import_stmt.import_info = self.import_info
return True
if self.project.prefs.get("split_imports"):
return (
self.import_info.names_and_aliases == import_info.names_and_aliases
)
new_pairs = list(import_info.names_and_aliases)
for pair in self.import_info.names_and_aliases:
if pair not in new_pairs:
new_pairs.append(pair)
import_stmt.import_info = importinfo.FromImport(
import_info.module_name, import_info.level, new_pairs
)
return True
class ExpandStarsVisitor(ImportInfoVisitor):
def __init__(self, project, folder, can_select):
self.project = project
self.filtering = FilteringVisitor(project, folder, can_select)
self.context = importinfo.ImportContext(project, folder)
def visitNormalImport(self, import_stmt, import_info):
self.filtering.dispatch(import_stmt)
def visitFromImport(self, import_stmt, import_info):
if import_info.is_star_import():
new_pairs = []
for name in import_info.get_imported_names(self.context):
new_pairs.append((name, None))
new_import = importinfo.FromImport(
import_info.module_name, import_info.level, new_pairs
)
import_stmt.import_info = self.filtering.visitFromImport(None, new_import)
else:
self.filtering.dispatch(import_stmt)
class SelfImportVisitor(ImportInfoVisitor):
def __init__(self, project, current_folder, resource):
self.project = project
self.folder = current_folder
self.resource = resource
self.to_be_fixed = set()
self.to_be_renamed = set()
self.context = importinfo.ImportContext(project, current_folder)
def visitNormalImport(self, import_stmt, import_info):
new_pairs = []
for name, alias in import_info.names_and_aliases:
resource = self.project.find_module(name, folder=self.folder)
if resource is not None and resource == self.resource:
imported = name
if alias is not None:
imported = alias
self.to_be_fixed.add(imported)
else:
new_pairs.append((name, alias))
if not import_info._are_name_and_alias_lists_equal(
new_pairs, import_info.names_and_aliases
):
import_stmt.import_info = importinfo.NormalImport(new_pairs)
def visitFromImport(self, import_stmt, import_info):
resource = import_info.get_imported_resource(self.context)
if resource is None:
return
if resource == self.resource:
self._importing_names_from_self(import_info, import_stmt)
return
pymodule = self.project.get_pymodule(resource)
new_pairs = []
for name, alias in import_info.names_and_aliases:
try:
result = pymodule[name].get_object()
if (
isinstance(result, pyobjects.PyModule)
and result.get_resource() == self.resource
):
imported = name
if alias is not None:
imported = alias
self.to_be_fixed.add(imported)
else:
new_pairs.append((name, alias))
except exceptions.AttributeNotFoundError:
new_pairs.append((name, alias))
if not import_info._are_name_and_alias_lists_equal(
new_pairs, import_info.names_and_aliases
):
import_stmt.import_info = importinfo.FromImport(
import_info.module_name, import_info.level, new_pairs
)
def _importing_names_from_self(self, import_info, import_stmt):
if not import_info.is_star_import():
for name, alias in import_info.names_and_aliases:
if alias is not None:
self.to_be_renamed.add((alias, name))
import_stmt.empty_import()
class SortingVisitor(ImportInfoVisitor):
def __init__(self, project, current_folder):
self.project = project
self.folder = current_folder
self.standard = set()
self.third_party = set()
self.in_project = set()
self.future = set()
self.context = importinfo.ImportContext(project, current_folder)
def visitNormalImport(self, import_stmt, import_info):
if import_info.names_and_aliases:
name, alias = import_info.names_and_aliases[0]
resource = self.project.find_module(name, folder=self.folder)
self._check_imported_resource(import_stmt, resource, name)
def visitFromImport(self, import_stmt, import_info):
resource = import_info.get_imported_resource(self.context)
self._check_imported_resource(import_stmt, resource, import_info.module_name)
def _check_imported_resource(self, import_stmt, resource, imported_name):
info = import_stmt.import_info
if resource is not None and resource.project == self.project:
self.in_project.add(import_stmt)
elif _is_future(info):
self.future.add(import_stmt)
elif imported_name.split(".")[0] in stdmods.standard_modules():
self.standard.add(import_stmt)
else:
self.third_party.add(import_stmt)
class LongImportVisitor(ImportInfoVisitor):
def __init__(self, current_folder, project, maxdots, maxlength):
self.maxdots = maxdots
self.maxlength = maxlength
self.to_be_renamed = set()
self.current_folder = current_folder
self.project = project
self.new_imports = []
def visitNormalImport(self, import_stmt, import_info):
for name, alias in import_info.names_and_aliases:
if alias is None and self._is_long(name):
self.to_be_renamed.add(name)
last_dot = name.rindex(".")
from_ = name[:last_dot]
imported = name[last_dot + 1 :]
self.new_imports.append(
importinfo.FromImport(from_, 0, ((imported, None),))
)
def _is_long(self, name):
return name.count(".") > self.maxdots or (
"." in name and len(name) > self.maxlength
)
class RemovePyNameVisitor(ImportInfoVisitor):
def __init__(self, project, pymodule, pyname, folder):
self.pymodule = pymodule
self.pyname = pyname
self.context = importinfo.ImportContext(project, folder)
def visitFromImport(self, import_stmt, import_info):
new_pairs = []
if not import_info.is_star_import():
for name, alias in import_info.names_and_aliases:
try:
pyname = self.pymodule[alias or name]
if occurrences.same_pyname(self.pyname, pyname):
continue
except exceptions.AttributeNotFoundError:
pass
new_pairs.append((name, alias))
return importinfo.FromImport(
import_info.module_name, import_info.level, new_pairs
)
def dispatch(self, import_):
result = ImportInfoVisitor.dispatch(self, import_)
if result is not None:
import_.import_info = result
def _is_future(info):
return isinstance(info, importinfo.FromImport) and info.module_name == "__future__"
|
python-rope/rope
|
rope/refactor/importutils/actions.py
|
Python
|
lgpl-3.0
| 14,313
|
[
"VisIt"
] |
fb191a0022f642b9754b61c34b704285310300a74422aabdce21f355f5307053
|
#==============================================================================
# import packages
#==============================================================================
import numpy as np
import mayavi.mlab as mlab
from tvtk.api import tvtk
import csv
import math
from mayavi.filters.mask_points import MaskPoints
from mayavi.modules.glyph import Glyph
from mayavi.tools.sources import MGlyphSource
import timeit
import glob
from mayavi.modules.surface import Surface
import os.path
import time
#==============================================================================
# load tile dictionary
#==============================================================================
external_node = True
if external_node:
foldername = r'D:\WS\\'
else:
foldername = r'D:\rongen\Documents\External node data\\'
tiledictinp = {}
for key, val in csv.reader(open(foldername+'tiledict.csv')):
# key = 'VyyHxx' ,val = val = [left, bottom, right, top]
tiledictinp[key] = [float(i) for i in val[1:-2].split(',')]
#==============================================================================
# Load npoints per tile dictionary
#==============================================================================
npointsdict = {}
f = open(r'D:\WS\NPYtiles\memory_maps\npoints_per_set.txt')
for line in f:
npointsdict[line.split(';')[0]] = int(line.split(';')[1])
#==============================================================================
# define class for tile
#==============================================================================
class Tile:
def __init__(self, tag, bounds, use_mask):
self.tag = tag
self.left = bounds[0]
self.right = bounds[2]
self.top = bounds[3]
self.bottom = bounds[1]
self.center = [0.5*(bounds[0]+bounds[2]), 0.5*(bounds[1]+bounds[3])]
self.cornors = np.array([[self.left, self.bottom],
[self.right, self.bottom],
[self.right, self.top ],
[self.left, self.top ]])
self.use_mask = use_mask
def __repr__(self):
return 'Tile '+self.tag+' from (x0,y0) = ('+'%.0f'%self.left + \
',' + '%.0f'%self.bottom + ')' + ' to (x1,y1) = (' + \
'%.0f'%self.right + ',' + '%.0f'%self.top + ')'
def mmap_data(self):
self.xyz = np.memmap(foldername+'NPYtiles\memory_maps\\'+self.tag+'xyz.dat', dtype = 'float32', shape = tuple((npointsdict[self.tag],3)))
self.colors = np.memmap(foldername+'NPYtiles\memory_maps\\'+self.tag+'col.dat', dtype = 'uint8', shape = tuple((npointsdict[self.tag],3)))
## Old method commented below. Takes slightly more time.
# self.xyz = np.load(foldername+'NPYtiles\LEVEL0\\'+self.tag+'xyz.npy', mmap_mode = 'r')
# self.colors = np.load(foldername+'NPYtiles\LEVEL0\\'+self.tag+'col.npy', mmap_mode = 'r')
self.points_in_set = np.empty(5, dtype = np.uint32)
self.slices = []
for i in range(5):
self.points_in_set[i] = len(self.xyz) / 10**i
self.slices.append(slice(self.points_in_set[i]))
def get_level(self, npoints):
if npoints < self.points_in_set[0]:
lvl = np.nonzero(npoints < self.points_in_set)[0][-1]
else:
lvl = 0
return lvl
def load_data_to_screen(self, engine, fig, npoints):
'''
Load data with a certain step to the container list.
Parameters
----------
step : The step size with which datapoints are skipped.
'''
self.lvl = self.get_level(npoints)
slc = self.slices[self.lvl]
# Create a glpyh data source, set data to is, add it to the
# pipeline and create the mlab_source with which properties can be
# changed.
if len(self.xyz[slc]) > 0:
data_source = MGlyphSource()
data_source.reset(x = self.xyz[slc, 0], y = self.xyz[slc, 1], z = self.xyz[slc, 2])
vtk_data_source = mlab.pipeline.add_dataset(data_source.dataset, name = self.tag)
data_source.m_data = vtk_data_source
col = tvtk.UnsignedCharArray()
col.from_array(self.colors[slc])
vtk_data_source.mlab_source.dataset.point_data.scalars=col
vtk_data_source.mlab_source.dataset.modified()
if self.use_mask:
# Create mask, add it to the engine and set its properties
self.mask = MaskPoints()
engine.add_filter(self.mask, vtk_data_source)
self.mask.filter.random_mode = False
self.mask.filter.maximum_number_of_points = npoints
# Create a glyph to visualize the points, add it to the engine and
# set its properties
glyph = Glyph()
if self.use_mask:
engine.add_filter(glyph, self.mask)
else:
engine.add_filter(glyph, vtk_data_source)
glyph.glyph.glyph_source.glyph_source.glyph_type = 'vertex'
glyph.name = self.tag
# print 'Npoints =', self.mask.filter.maximum_number_of_points
#==============================================================================
# class for tilegrid
#==============================================================================
class Tilegrid:
def __init__(self, dictionary, use_mask):
self.dictionary = dictionary
self.tiles = []
self.tags = []
self.corners = []
self.centers = []
self.npoints = []
self.vis_points = np.zeros(len(dictionary))
self.vis_points_old = np.zeros(len(dictionary))
# Get x range and y range for tiles
xr = []
yr = []
# Append all x and y coordinates from the dictionary
for i in self.dictionary.values():
xr.append(i[0])
xr.append(i[2])
yr.append(i[1])
yr.append(i[3])
# Sort out double values
self.xr = np.unique(xr)
self.yr = np.unique(np.array(yr).round(3))
# Add tile objects to self.tiles
remove_entries = []
for i in range(len(dictionary)):
if os.path.isfile(r'D:\WS\NPYtiles\memory_maps\{}xyz.dat'.format(self.dictionary.keys()[i])):
print 'Processing tile:',self.dictionary.keys()[i]
self.tiles.append(Tile(self.dictionary.keys()[i],
self.dictionary.values()[i], use_mask))
self.tiles[-1].mmap_data()
self.tags.append(self.dictionary.keys()[i])
self.corners.append(self.tiles[-1].cornors)
self.centers.append(self.tiles[-1].center)
self.npoints.append(int(len(self.tiles[-1].xyz)))
else:
remove_entries.append(i)
newdict = dict(self.dictionary)
for i in remove_entries:
del newdict[self.dictionary.keys()[i]]
self.dictionary = newdict
# Make one big [x,y] array from the corners
self.corners = np.vstack(self.corners)
self.centers = np.array(self.centers)
self.npoints = np.array(self.npoints)
def update_proportions(self, camera_position, in_focal_range, total):
'''
Tilegrid method which calculates the distance between the camera and
each center of a tile.
Assumed is that the center of each tile has a z-coordinate of 0 m.
'''
index = np.in1d(self.tags, list(in_focal_range))
dx = self.centers[index, 0] - camera_position[0]
dy = self.centers[index, 1] - camera_position[1]
dz = camera_position[2]
dist = (dx**2 + dy**2 + dz**2)**0.5
alpha = np.arctan(dz/(dx**2+dy**2)**0.5)
proportion = dist**-2 * self.npoints[index] * np.sin(alpha)
proportion = proportion / sum(proportion)
self.number_of_visible_points = np.zeros(len(self.tiles), dtype = np.uint32)
self.number_of_visible_points[index] = np.minimum((proportion * total), self.npoints[index]).astype(np.uint32)
#==============================================================================
# Class for model results
#==============================================================================
class ModelResults:
def __init__(self, file_locs, engine, scene):
self.engine = engine
self.scene = scene
self.file_locs = file_locs
self.count = 0
self.file_locs2 = glob.glob(foldername+'Grids\wlev\*.xml')
self.change_data()
def change_data(self, frame = None):
cam, foc = mlab.move()
mlab.view(focalpoint = np.array([foc[0], 211457, 0]))
for _ in range(5):
for i in self.scene.children:
if i.name == 'gridsurface' or i.name == 'gridsurface2' or i.name == 'waterlevel':
i.remove()
if frame == 'forward':
self.count = (self.count + 1) % len(self.file_locs)
frame = np.copy(self.count)
elif frame == 'back':
self.count = (self.count - 1)
frame = np.copy(self.count)
elif frame != None:
self.count = (self.count + 1) % len(self.file_locs)
else:
frame = np.copy(self.count)
read = tvtk.XMLUnstructuredGridReader(file_name = self.file_locs[frame])
self.grid = read.get_output()
mlab.pipeline.add_dataset(self.grid, 'gridsurface')
self.engine.add_module(Surface())
mlab.title('{:02d}:{:02d}'.format(int(np.floor(frame/2)%24), int(frame%2 * 30)), size = 0.5)
for i in self.scene.children:
if i.name == 'gridsurface':
surface = i.children[0].children[0]
module_manager = i.children[0]
module_manager.scalar_lut_manager.label_text_property.font_family = 'times'
module_manager.scalar_lut_manager.label_text_property.italic = False
module_manager.scalar_lut_manager.label_text_property.color = (0.0, 0.0, 0.0)
module_manager.scalar_lut_manager.label_text_property.font_size = 8
module_manager.scalar_lut_manager.title_text_property.font_family = 'times'
module_manager.scalar_lut_manager.title_text_property.italic = False
module_manager.scalar_lut_manager.title_text_property.color = (0.0, 0.0, 0.0)
module_manager.scalar_lut_manager.scalar_bar_representation.position2 = np.array([ 0.09600313, 0.57926078])
module_manager.scalar_lut_manager.scalar_bar_representation.position = np.array([ 0.89399687, 0.32073922])
module_manager.scalar_lut_manager.number_of_labels = 9
text = i.children[0].children[1]
text.property.font_family = 'times'
text.property.color = (0.0, 0.0, 0.0)
surface.enable_contours = True
surface.contour.auto_contours = False
surface.contour.contours = list(np.arange(-20,20,0.25))
surface.contour.filled_contours = True
cmap = [ (212, 53, 53), (215, 215, 215), (101, 194, 113)]
self.change_repr('gridsurface', self.engine, colormap = cmap, range = [-10,10], show_bar = True)
read2 = tvtk.XMLUnstructuredGridReader(file_name = self.file_locs2[frame])
self.wlev = read2.get_output()
mlab.pipeline.add_dataset(self.wlev, 'waterlevel')
self.engine.add_module(Surface())
cmap = [ (0, 0, 255), (0, 0, 255)]
self.change_repr('waterlevel', self.engine, colormap = cmap, alpha = 0.2)
for i in self.scene.children:
if i.name == 'waterlevel':
surface = i.children[0].children[0]
surface.enable_contours = True
surface.contour.auto_contours = False
surface.contour.contours = list(np.arange(-20,20,0.5))
surface.contour.filled_contours = True
lines = read.get_output()
mlab.pipeline.add_dataset(lines, 'gridlines')
self.engine.add_module(Surface())
cmap = [ (255, 255, 255), (255, 255, 255)]
self.change_repr('gridlines', self.engine, colormap = cmap, alpha = 0.1, representation = 'wireframe')
return self.count
#==========================================================================
# Function to create colormap
#==========================================================================
def create_colormap(self, col, nsteps):
'''
Creates a colormap based on an (uneven) number of input colors. This
function basically makes a linspace between all the rgb values that
are given, creating a colormap. Note that for now the number of colors
should be uneven, or else the output will not have the right number of
values.
Parameters
----------
colors: list with rgb tuples
nsteps: length of the rgb array to be outputted
Returns
-------
(nsteps, 3) shaped array with rgb values.
'''
colorlist = []
for i in range(len(col)-1):
colorlist.append(np.c_[
np.linspace(col[i][0], col[i+1][0], nsteps/(len(col)-1)),
np.linspace(col[i][1], col[i+1][1], nsteps/(len(col)-1)),
np.linspace(col[i][2], col[i+1][2], nsteps/(len(col)-1))])
colors = np.vstack(colorlist)
return colors
#==========================================================================
# Function to change layout properties
#==========================================================================
def change_repr(self, name, engine, representation = 'surface',
show_bar = False, alpha = 1.0, colormap = 'RdBu',
range = None):
'''
Change the layout of the unstructured grids on screen
'''
for child in engine.scenes[0].children:
if child.name == name:
if representation != 'surface':
child.children[0].children[0].actor.property.\
representation = representation
scl = child.children[0].scalar_lut_manager
if show_bar:
scl.show_scalar_bar = True
scl.show_legend = True
if isinstance(colormap, str):
if colormap != scl.lut_mode:
scl.lut_mode = colormap
else:
lut = self.create_colormap(colormap, 256)
scl.lut.table = np.hstack(
(lut, (255 * np.ones(len(lut)))[:,np.newaxis]))
if range:
scl.data_range = np.array(range)
scl.use_default_range = False
if alpha < 1.0:
lut = scl.lut.table.to_array()
lut[:, -1] = np.ones(256) * alpha * 255.
scl.lut.table = lut
#==============================================================================
# Camera class
#==============================================================================
class Camera:
def __init__(self, camera, start_pos, fig):
print 'Camera created'
self.position = camera.position
self.fp = np.array(start_pos) # array([x, y, z])
self.va = camera.view_angle
# The on screen crosshead
self.crosshead = mlab.points3d(self.fp[0], self.fp[1], self.fp[2],
mode = '2dcross', scale_factor = 1,
color = (0, 0, 0), name = 'crosshead',
figure = fig, transparent = True)
mlab.view(focalpoint = self.fp, distance = 10000)
self.update_geometry()
# self.range_box = mlab.plot3d(self.fr[:,0], self.fr[:,1], self.fr[:,2],
# color = (1, 0, 0), tube_radius = 50,
# name = 'rangebox', figure = fig)
self.update_screen()
def update_geometry(self):
'''
Camera method to calculates the geometry of the view.
This means the angles between the viewing line and the axes and the
edges of the screen.
'''
# Get the position of the focal point and camera
azm, elev, self.distance, self.fp = mlab.view()
self.position, self.fp = mlab.move()
# Convert angles to radians
self.azm = azm * 2 * np.pi / 360.
elev = 90 - elev
self.elev = elev * 2 * np.pi / 360.
# Calculate vertical distance between focal point and camera
dz = self.position[2] - self.fp[2]
# Calculate horizontal distances to screen bottom and top
h_to_bottom = dz / math.tan(self.elev) - \
dz / math.tan(0.5*self.va/(180/math.pi)+self.elev)
# Correct for top of screen not intersecting terrain in front of focal
# point
upper_angle = (self.elev - 0.5 * self.va/(180/math.pi))
if upper_angle <= 0:
h_to_top = dz * 100 - dz / math.tan(self.elev)
else:
h_to_top = dz / math.tan(upper_angle) - dz / math.tan(self.elev)
# Calculate absolute distances to screen bottom and top
a_to_bottom = dz / math.sin(self.elev + 0.5 * self.va/(180/math.pi))
# Correct for top of screen not intersecting terrain in front of focal
# point
if upper_angle <= 0:
a_to_top = (10001*dz**2)**0.5
else:
a_to_top = dz / math.sin(self.elev - 0.5 * self.va/(180/math.pi))
# Calculate horizontal distances to left and right edges of the screen
bottom_to_edge = math.tan(0.9*self.va/(180/math.pi))*a_to_bottom
top_to_edge = math.tan(0.9*self.va/(180/math.pi))*a_to_top
# Calculate the edges of the focal range, and put them in an
# x and y-array like: [leftdown, rightdown, rightup, leftup, leftdown]
fr_x = np.array([self.fp[0]+math.cos(self.azm)*h_to_bottom +
math.sin(self.azm)*bottom_to_edge,
self.fp[0]+math.cos(self.azm)*h_to_bottom -
math.sin(self.azm)*bottom_to_edge,
self.fp[0]-math.cos(self.azm)*h_to_top -
math.sin(self.azm)*top_to_edge,
self.fp[0]-math.cos(self.azm)*h_to_top +
math.sin(self.azm)*top_to_edge,
self.fp[0]+math.cos(self.azm)*h_to_bottom +
math.sin(self.azm)*bottom_to_edge])
fr_y = np.array([self.fp[1]+math.sin(self.azm)*h_to_bottom -
math.cos(self.azm)*bottom_to_edge,
self.fp[1]+math.sin(self.azm)*h_to_bottom +
math.cos(self.azm)*bottom_to_edge,
self.fp[1]-math.sin(self.azm)*h_to_top +
math.cos(self.azm)*top_to_edge,
self.fp[1]-math.sin(self.azm)*h_to_top -
math.cos(self.azm)*top_to_edge,
self.fp[1]+math.sin(self.azm)*h_to_bottom -
math.cos(self.azm)*bottom_to_edge])
# Create a 3 by 5 array to with the 5 (2x point 1) focal range points
self.fr = np.transpose(np.vstack((fr_x, fr_y, 30*np.ones(5))))
def update_screen(self):
'''
Camera method which updates the focal point and focal range on screen.
'''
self.crosshead.mlab_source.dataset.points = [tuple(self.fp)]
# self.range_box.mlab_source.dataset.points = self.fr
def move(self, direction):
'''
Camera method to move the camera. The direction is determined by key-
presses. The stepsize is determined with the mpf (meters per frame)
keyword argument.
Parameters
----------
direction : direction in which the camera should move
'''
# Set the step size per frame (meters per frame)
mpf = self.position[2] / 10.
# Move the camera depending on the received direction
if direction == 'up':
mlab.view(focalpoint = self.fp+[-mpf*math.cos(self.azm),
-mpf*math.sin(self.azm), 0], distance =self.distance)
elif direction == 'left':
mlab.view(focalpoint = self.fp+[ mpf*math.sin(self.azm),
-mpf*math.cos(self.azm), 0], distance =self.distance)
elif direction == 'right':
mlab.view(focalpoint = self.fp+[-mpf*math.sin(self.azm),
mpf*math.cos(self.azm), 0], distance =self.distance)
elif direction == 'down':
mlab.view(focalpoint = self.fp+[ mpf*math.cos(self.azm),
mpf*math.sin(self.azm), 0], distance =self.distance)
#==============================================================================
# Make visualizer class
#==============================================================================
class Visualizer:
'''
The visualizer is a class which processes the data viewed on the window.
It has a camera instance and tilegrid instance, to manage the data and
checks which data must be displayed, and which data must not. It can be
seen as the manager of the program.
Parameters
----------
tiledict : A python dictionary which contains the metadata of the tiles.
start_position : The start position of the focal range. Makes sure that the
camera will start at the right position, or a specifically chosen
position.
'''
def __init__(self, tiledict, start_position, total_points, show_lidar, mask = False):
print 'Visualizer created'
self.fig = mlab.gcf()
istyle = tvtk.InteractorStyleTerrain()
self.iactor = self.fig.scene.interactor
self.iactor.interactor_style = istyle
# Add interaction observers
istyle.add_observer('KeyPressEvent', self.Callback)
istyle.add_observer('EndInteractionEvent', self.Callback)
istyle.add_observer('InteractionEvent', self.Callback)
istyle.add_observer('MouseWheelBackwardEvent', self.Callback)
istyle.add_observer('MouseWheelForwardEvent', self.Callback)
# Total number of points
self.total_points = total_points
# The first tile from the dictionary is taken as the active tile
self.activetiles = set()
# The scene is where all the data (children) are added to and the
# camera is in
self.engine = mlab.get_engine()
self.scene = self.engine.scenes[0]
# Set background color
self.scene.scene.background = (208/255., 238/255., 232/255.)
# The camera is gives the view on the data
self.camera = Camera(self.scene.scene.camera, start_position,
self.fig)
self.show_lidar = show_lidar
if self.show_lidar:
self.tilegrid = Tilegrid(tiledict, mask)
print 'Tilegrid created'
self.model_results = ModelResults(glob.glob(foldername+'Grids\erosion\*.xml'),
self.engine, self.scene)
self.use_mask = mask
self.time1 = 0 # Positions
self.time2 = 0 # Tiles in range
self.time3 = 0 # Proportions
self.time4 = 0 # Adding tiles
self.time5 = 0 # Removing tiles
self.time6 = 0 # Updating tiles
self.time7 = 0 # Going trough list
self.update_screen_data()
# Define function for updating dataset
def update_screen_data(self):
'''
Visualizer method which updates the topographic data which is viewe d.
It adds or removes tiles with data dependend on the focal range of the
camera.
'''
start = timeit.default_timer()
new = self.get_big_box()
# print ' Getting big box took:',timeit.default_timer() - start,'s.'
self.time2 += (timeit.default_timer() - start)
if self.show_lidar:
start = timeit.default_timer()
self.tilegrid.update_proportions(self.camera.position, new, self.total_points)
# print ' Getting new proportions:',timeit.default_timer() - start,'s.'
self.time3 += (timeit.default_timer() - start)
# Add set
add = new.difference(self.activetiles)
for i in list(add):
start = timeit.default_timer()
j = self.tilegrid.tags.index(i)
self.time7 += (timeit.default_timer() - start)
start = timeit.default_timer()
self.tilegrid.tiles[j].load_data_to_screen(self.engine, self.fig,
self.tilegrid.number_of_visible_points[j])
self.time4 += (timeit.default_timer() - start)
start = timeit.default_timer()
# Remove sets
remove = self.activetiles.difference(new)
for i in list(remove):
for j in self.scene.children:
if j.name == i:
j.remove()
self.activetiles = new
self.time5 += (timeit.default_timer() - start)
start = timeit.default_timer()
# Update sets
for i in list(new):
j = self.tilegrid.tags.index(i)
tile = self.tilegrid.tiles[j]
new_lvl = tile.get_level(self.tilegrid.number_of_visible_points[j])
if new_lvl != tile.lvl:
for children in self.scene.children:
if children.name == i:
children.remove()
tile.load_data_to_screen(self.engine, self.fig,
self.tilegrid.number_of_visible_points[j])
# tile.data_source.reset(x = tile.xyz[new_lvl][:, 0], y = tile.xyz[new_lvl][:, 1], z = tile.xyz[new_lvl][:, 2])
if self.use_mask:
# If the differences are small, only update the mask
if (self.tilegrid.number_of_visible_points[j] > tile.mask.filter.maximum_number_of_points * 1.11 or
self.tilegrid.number_of_visible_points[j] < tile.mask.filter.maximum_number_of_points * 0.90):
start = timeit.default_timer()
tile.mask.filter.maximum_number_of_points = int(self.tilegrid.number_of_visible_points[j])
# print 'Adjusting mask took:',timeit.default_timer() - start
self.time6 += (timeit.default_timer() - start)
# print tile..tag, self.number_of_visible_points[j], tile.number_of_loaded_points,'Refreashing dataset'
# print self.npoints[j]
def point_in_triangles(self, tr1, tr2, p):
'''
Function which checks if a certain coordinate lies within one of
two triangles. The two triangles form the focal range, so this
fucntion checks which tiles are in the focal range.
The four point per tile which are checked, are the corner point.
This means that an edge of the focal range can be within a tile,
without crossing the corner point. Therefor the corner_in_tile
function is defined below.
This function is bases on a the script of Perro Azul available on:
http://jsfiddle.net/PerroAZUL/zdaY8/1/
Parameters
----------
tr1 : triangle 1. 2 columns (x, y) and 3 rows (3 coordinates)
tr2 : triangle 2. 2 columns (x, y) and 3 rows (3 coordinates)
p : array of points to be checked
Returns
-------
out : Array with bool for every 4 points. 4 points since it is assumed
that 4 subsequent point belong to 1 square.
'''
# Calculate the areas of the two triangles
A1 = (0.5 * (-tr1[1,1] * tr1[2,0] + tr1[0,1] * (-tr1[1,0] +
tr1[2,0]) + tr1[0,0] * (tr1[1,1] - tr1[2,1]) + tr1[1,0] *
tr1[2,1]))
A2 = (0.5 * (-tr2[1,1] * tr2[2,0] + tr2[0,1] * (-tr2[1,0] +
tr2[2,0]) + tr2[0,0] * (tr2[1,1] - tr2[2,1]) + tr2[1,0] *
tr2[2,1]))
# Check whether the area is negative or positive. The area can be
# negative when the points are defined (anti-)clockwise
sign1 = -1 if A1 < 0 else 1
sign2 = -1 if A2 < 0 else 1
s1 = (tr1[0,1] * tr1[2,0] - tr1[0,0] * tr1[2,1] + (tr1[2,1] -
tr1[0,1]) * p[:,0] + (tr1[0,0] - tr1[2,0]) * p[:,1]) * sign1
s2 = (tr2[0,1] * tr2[2,0] - tr2[0,0] * tr2[2,1] + (tr2[2,1] -
tr2[0,1]) * p[:,0] + (tr2[0,0] - tr2[2,0]) * p[:,1]) * sign2
t1 = (tr1[0,0] * tr1[1,1] - tr1[0,1] * tr1[1,0] + (tr1[0,1] -
tr1[1,1]) * p[:,0] + (tr1[1,0] - tr1[0,0]) * p[:,1]) * sign1
t2 = (tr2[0,0] * tr2[1,1] - tr2[0,1] * tr2[1,0] + (tr2[0,1] -
tr2[1,1]) * p[:,0] + (tr2[1,0] - tr2[0,0]) * p[:,1]) * sign2
# Check whether s>0 and t>0 and (s+t)<2*A
check1 = (np.where(s1>0, True, False) * np.where(t1>0, True, False)
* np.where((s1+t1) < 2*A1*sign1, True, False))
check2 = (np.where(s2>0, True, False) * np.where(t2>0, True, False)
* np.where((s2+t2) < 2*A2*sign2, True, False))
# Add checks for the two triangles
check = check1 + check2
# Reshape and take the horizontal sum, since only one in four
# (edges) point need to be within a triangle to display the tile
check = check.reshape(len(check)/4, 4)
check = np.sum(check, axis = 1) > 0
return check
def corner_in_tile(self, edges, keys):
'''
Function which checks which tile is covered by the corner point.
This function is made to check for the tiles which are in the
focal range, but are not found with the point_in_triangle function.
'''
corners = self.camera.fr[:4, :2]
cornertiles = set()
# Check which tile is around a corner point
for i in range(4): # Always four corners
index = ((corners[i, 0] > edges[:, 0]) *
(corners[i, 0] < edges[:, 2]) *
(corners[i, 1] > edges[:, 1]) *
(corners[i, 1] < edges[:, 3]))
# Check if the corner is in a tile
if list(np.nonzero(index)[0]):
# Add tile to cornertiles set
for j in np.nonzero(index)[0]:
tile = keys[int(j)]
cornertiles.add(tile)
return cornertiles
def get_big_box(self):
'''
This function calculates which tiles should fit in the view
'''
# Get triangle coordinates from focal range
triangle1 = self.camera.fr[:3, :2] # [ left_bot, right_bot, right_top]
triangle2 = self.camera.fr[-3:, :2] # [ right_top, left_top, left_bot]
# Call the point_in_triangles function to get the covered tiles
if self.show_lidar:
covered_pc1 = self.point_in_triangles(triangle1, triangle2, self.tilegrid.corners)
covered_pc2 = self.corner_in_tile(np.array(self.tilegrid.dictionary.values()), self.tilegrid.dictionary.keys())
# Make a set with the tiles which should be displayed
if self.show_lidar:
new_pc_tiles = set(map(self.tilegrid.tags.__getitem__,
list(np.where(covered_pc1)[0])))
new_pc_tiles = new_pc_tiles.union(covered_pc2)
if self.show_lidar:
return new_pc_tiles
def Callback(self, obj, event):
'''
Visualizer method which processes the interaction events.
'''
# self.scene.scene.disable_render = True
if event == 'InteractionEvent':
self.camera.update_geometry()
# self.camera.update_screen()
else:
if event == 'KeyPressEvent':
# First move the focal point and camera
key = obj.GetInteractor().GetKeyCode()
if key == '8':
self.camera.move('up')
if key == '4':
self.camera.move('left')
if key == '6':
self.camera.move('right')
if key == '5':
self.camera.move('down')
if key == '1':
self.total_points /= 10
if key == '2':
self.total_points /= 2
if key == '7':
self.total_points *= 2
if key == '9':
self.total_points *= 10
print self.total_points
# For looping trough data
if key == '.':
count = self.model_results.change_data('forward')
mlab.view(focalpoint=[46634, 372930, 0.0], azimuth = 94.01, elevation = 82.45, distance=1852)
# mlab.view(focalpoint=[47210, 374628, 0.0], azimuth = 160, elevation = 78.76, distance=10000)
if key == ',':
count = self.model_results.change_data('back')
mlab.view(focalpoint=[46634, 372930, 0.0], azimuth = 94.01, elevation = 82.45, distance=1852)
# mlab.view(focalpoint=[47210, 374628, 0.0], azimuth = 160, elevation = 78.76, distance=10000)
# Get position properties
if key == 'g':
az, el, dst, fp = mlab.view()
print '--------\nAzimuth: {}\nElevation: {}\nDistance: {}\nFocal point: x={}, y={}, z={}'.format(az, el, dst, fp[0], fp[1], fp[2])
# Make a movie
if key == 'm':
self.scene.scene.anti_aliasing_frames = 0
self.engine.current_scene.scene.off_screen_rendering = True
for i in range(285):
self.model_results.change_data(i)
# mlab.view(focalpoint=[47210, 374628, 0.0], azimuth = 160, elevation = 78.76, distance=10000)
mlab.view(focalpoint=[46634, 372930, 0.0], azimuth = 94.01, elevation = 82.45, distance=1852)
self.scene.scene.save(r'C:\Users\rongen\Desktop\Video\turningcamera\snapshot{:03d}.png'.format(i))
time.sleep(1)
if event == 'MouseWheelForwardEvent':
self.camera.distance = self.camera.distance * 0.9
mlab.view(focalpoint=self.camera.fp, distance=self.camera.distance)
if event == 'MouseWheelBackwardEvent':
self.camera.distance = self.camera.distance * 1.111
mlab.view(focalpoint=self.camera.fp, distance=self.camera.distance)
start = timeit.default_timer()
self.camera.update_geometry()
# print '=======================================\nUpdating geometry took:',timeit.default_timer()-start,'s.'
self.time1 += (timeit.default_timer() - start)
start = timeit.default_timer()
self.camera.update_screen()
print 'Updating screen took:',timeit.default_timer()-start,'s.'
start = timeit.default_timer()
self.update_screen_data()
print 'Updating data took:',timeit.default_timer()-start,'s.'
# self.scene.scene.disable_render = False
#==============================================================================
# Initial conditions:
#==============================================================================
start_position_input = [32000, 385000, 0]
vis = Visualizer(tiledictinp, start_position_input, total_points = 30000000, show_lidar = True, mask = False)
mlab.show()
|
grongen/point-cloud-visualization
|
plotcloud3d.py
|
Python
|
gpl-3.0
| 36,535
|
[
"Mayavi"
] |
b6070f7f4269871aeae6957425294fae730271a7078732d4b3b7853c708eb8e6
|
# Loading libraries
from scipy.io import loadmat
import h5py
from time import strptime
from calendar import timegm
import pysam
print pysam.__version__
print pysam.__file__
# /nfs/software/cn/el6.5/python/envs/.virtualenvs/cpython279/lib/python2.7/site-packages/RSeQC-2.6.2-py2.7-linux-x86_64.egg
# pip install --target="/nfs/software/cn/el6.5/python/envs/.virtualenvs/cpython279/lib/python2.7/" pysam
# pip install --target="/nfs/software/cn/el6.5/python/envs/.virtualenvs/cpython279/lib/python2.7/site-packages/" pysam
# At the end the solution was to chmod by Pablo and desinstall the pysam version inside RSeQC
## Files are in:
# ftp://anonymous:@ftp.mrc-lmb.cam.ac.uk/pub/tjucikas/wormdatabase
## I copied all animals from JU440 into:
## ~/2016_worm_DB/30m_wait/
## then from ~/2016_worm_DB I create ju440_all folder and use this command
# find . -name \*.mat -exec cp {} ju440_all \;
input_file = '/Users/jespinosa/2016_worm_DB/ju440_all/575 JU440 on food L_2011_02_17__11_00___3___1_features.mat'
# worm_data = loadmat(input_file)
f = h5py.File(input_file)
f.keys()
f.id
f.ref
f.attrs.keys()
f['info'].keys()
## TO get the structure GO TO command line and type:
## h5ls -vlr "/Users/jespinosa/git/pergola/test/c_elegans_data_test/575 JU440 on food L_2011_02_17__11_00___3___1_features.mat"
### INFO
sex_r = f['info']['experiment']['worm']['sex']
# /info/experiment/worm/sex
## How to extract char
## http://stackoverflow.com/questions/12036304/loading-hdf5-matlab-strings-into-python
# for c in sex_r:
# print c
# print unichr(c)
sex = [''.join(unichr(c) for c in sex_r)]
sex = str(''.join(unichr(c) for c in sex_r))
# /info/experiment/worm/habituation (time of habituation)
# i.e. there is a lapse of time within which they don't track animals:
# "We observed a 30-min wait, before tracking, to allow worms to habituate
# after being picked and moved to their tracking plate"
habituation_r = f['info']['experiment']['worm']['habituation']
habituation = [''.join(unichr(c) for c in habituation_r)]
habituation = str(''.join(unichr(c) for c in habituation_r))
f['info']['experiment']['environment'].keys() # [u'annotations', u'arena', u'chemicals', u'food',
# u'illumination', u'temperature', u'timestamp', u'tracker']
# annotations (empty)
annotations_r = f['info']['experiment']['environment']['annotations']
annotations = [''.join(c.astype(str) for c in annotations_r)]
annotations = str(''.join(c.astype(str) for c in annotations_r))
# info/experiment/worm/genotype
genotype_r = f['info']['experiment']['worm']['genotype'] #type u2
genotype = [''.join(unichr(c) for c in genotype_r)]
genotype = str(''.join(unichr(c) for c in genotype_r))
# /info/experiment/worm/strain
strain_r = f['info']['experiment']['worm']['strain']
strain = str(''.join(unichr(c) for c in strain_r))
# age worm
# /info/experiment/worm/age
age_r = f['info']['experiment']['worm']['age'] #type u2
age = str(''.join(unichr(c) for c in age_r))
# /info/experiment/environment/food
food_r = f['info']['experiment']['environment']['food'] #type u2
food = str(''.join(unichr(c) for c in food_r))
# /info/experiment/environment/timestamp
timestamp_r = f['info']['experiment']['environment']['timestamp'] #type u2
timestamp = str(''.join(unichr(c) for c in timestamp_r))
# HH:MM:SS.mmmmmm
my_date_object = strptime(timestamp, '%Y-%m-%d %H:%M:%S.%f')
unix_time = timegm(my_date_object) # utc based # correct!!!
# /info/video/length/time
time_recorded_r = f['info']['video']['length']['time']
time_recorded = time_recorded_r[0][0]
# /info/video/length/frames
frames_r = f['info']['video']['length']['frames']
time = frames_r[0][0]
##############
## WORM DATA
f['worm'].keys() # [u'locomotion', u'morphology', u'path', u'posture']
f['worm']['locomotion'].keys()
f['worm']['locomotion']['velocity'].keys() # [u'head', u'headTip', u'midbody', u'tail', u'tailTip']
# These primary features were also evaluated in different contexts to give more complex parameterization:
# for example, mean speed was measured over the entire video as well as independently for periods when the
# animal was moving either forward or backward. Likewise, dorsal and ventral bending were measured over the
# entire body and in specific regions such as the head, tail and midbody. Finally, specific behavioral events
# such as reversals or omega turns were used to generate secondary parameters, such as the frequency, time spent
# in execution and distance covered during the event.
# la ideas seria por ejemplo hacer un bed para forward backward
# y otro para velocidad y entonces hacer un analysis parecido al del paper
# comparar por ejemplo si las velocidades son iguales
tail_v = f['worm']['locomotion']['velocity']['tail']['speed']
midbody_v = f['worm']['locomotion']['velocity']['midbody']['speed']
head_v = f['worm']['locomotion']['velocity']['head']['speed']
tail_v = f['worm']['locomotion']['velocity']['tailTip']['speed']
headTip_v = f['worm']['locomotion']['velocity']['headTip']['speed']
tailTip_v = f['worm']['locomotion']['velocity']['tailTip']['speed']
len(tail_v)
len(midbody_v)
len(head_v)
len(tail_v)
f['worm']['path'].keys() # [u'coordinates', u'curvature', u'duration', u'range']
f['worm']['path']['duration'].keys() # [u'arena', u'head', u'midbody', u'tail', u'worm']
f['worm']['path']['duration']['arena'].keys() #[u'height', u'max', u'min', u'width']
f['worm']['path']['duration']['arena']['min'].keys() # x y
f['worm']['path']['duration']['arena']['min']['x'][0]
f['worm']['path']['duration']['arena']['min']['y'][0]
f['worm']['path']['duration']['arena']['max']['x'][0]
f['worm']['path']['duration']['arena']['max']['y'][0]
f['worm']['path']['duration']['head'].keys() # [u'indices', u'times']
f['worm']['path']['duration']['head']['indices']
f['worm']['path']['duration']['head']['times'][0][1]
f['worm']['path']['duration']['head']['indices'][0][1]
times = f['worm']['path']['duration']['head']['times'][0]
len(times)
times[0]
##
f['worm']['path']['duration']['arena']
## son las tres iguales 26995
head_v[200:300]
tailTip_v[200:300]
headTip_v[200:300]
tailTip_v[200][0] #nan
# Is this nan related with mode or paused
mode_l[200] #no
start_paused # no is when the animals stop moving
f['worm']['locomotion']['velocity'].keys() # [u'head', u'headTip', u'midbody', u'tail', u'tailTip']
for i in midbody_v[1:40]: print i
# Frames contain the frames recorded during the experiment
frames
len(midbody_v) # 26995 estos son las frames
# web frames 26995
time_recorded # 898.932
898.932 / 60 # = 14.9822
# min x 60s/1min
14.9822 * 60 # = 898.932
# web FPS 30.03
898.932 * 30.03 #= 26994.92796 #it match OK!!!
## Info
f['info'].keys()
f['info']['video'].keys()
f['info']['video']['resolution'].keys()
fps = f['info']['video']['resolution']['fps'][0][0]
width_pix = f['info']['video']['resolution']['width'][0][0]
26995/640
## Locomoation data (forward, backward, etc)
f['worm']['locomotion'].keys()
f['worm']['locomotion']['motion'].keys() # [u'backward', u'forward', u'mode', u'paused']
f['worm']['locomotion']['motion']['forward'].keys() # [u'frames', u'frequency', u'ratio']
f['worm']['locomotion']['motion']['forward']['frames'].keys() # [u'distance', u'end', u'interDistance', u'interTime', u'start', u'time']
f['worm']['locomotion']['motion']['forward']['frames']['distance'][0][0]
#motion
start_for_r = f['worm']['locomotion']['motion']['forward']['frames']['start']
end_for_r = f['worm']['locomotion']['motion']['forward']['frames']['end']
start_back_r = f['worm']['locomotion']['motion']['backward']['frames']['start']
end_back_r = f['worm']['locomotion']['motion']['backward']['frames']['end']
# paused
start_paused_r = f['worm']['locomotion']['motion']['paused']['frames']['start']
end_paused_r = f['worm']['locomotion']['motion']['paused']['frames']['end']
end_paused = list()
for element in end_paused_r:
end_paused.append(f[element[0]][0][0])
end_paused[0]
start_paused = list()
for element in start_paused_r:
start_paused.append(f[element[0]][0][0])
end_for[-1]
start_for[-1]
start_for[0] #17 En midbody es 17+1
start_back[0] #274
end_back[0] #299
start_paused[0] #860
((0.30*3 + 0.20*1 + 0.10*2)/100)/2
((0.30*3 + 0.20*1 + 0.10*2)/100)/4
midbody_v[0:12]
midbody_v[0:17] # it is exactly the index without summing or substracting 1
midbody_v[0:18]
# What is mode
mode_l = f['worm']['locomotion']['motion']['mode']
mode_l[-20]
# mode_l = list()
# for element in mode_r:
# mode_l.append(f[element[0]][0][0])
len(midbody_v)
midbody_v[start_for[-1]: end_for[-1]]
midbody_v[1:3]
midbody_v[26580: 26994]
midbody_v[26579]
## WORKING
# http://stackoverflow.com/questions/27670149/read-matlab-v7-3-file-into-python-list-of-numpy-arrays-via-h5py
end_t = [f[element[0]][:] for element in end]
end_t
# Los datos de los celegans estan en la pagina web cuando pones show all.
sex
habituation # 30 minutes
annotations
genotype
strain
age
food
unix_time
fps
##############
## WORM DATA
# turns
# /worm/locomotion/turns/omegas/frames/start
# start_omegas_r = f['worm']['locomotion']['turns']['omegas']['frames']['start'][0][0]
# end_omegas_r = f['worm']['locomotion']['turns']['omegas']['frames']['end'][0][0]
# start_upsilons_r = f['worm']['locomotion']['turns']['upsilons']['frames']['start'][0][0]
# start_upsilons_r = f['worm']['locomotion']['turns']['upsilons']['frames']['start'][0][1]
## Asi funciona
# f['worm']['locomotion']['turns'].keys() # [u'omegas', u'upsilons']
# end_upsilons_r = f['worm']['locomotion']['turns']['upsilons']['frames']['end'][0][0]
# end_upsilons_r = f['worm']['locomotion']['turns']['upsilons']['frames']['end'][0][1]
# f[end_upsilons_r][0][0]
# ary_start_upsilons_r = f['worm']['locomotion']['turns']['upsilons']['frames']['start'][0]
# ary_end_upsilons_r = f['worm']['locomotion']['turns']['upsilons']['frames']['end'][0]
# ary_refs = ary_end_upsilons_r
|
cbcrg/pergola
|
test/c_elegans_data_test/read_worm_data_dev.py
|
Python
|
gpl-3.0
| 9,914
|
[
"pysam"
] |
898c6eb17c7e91b9dd4a83325bb7a50cb3e0e974696b87f9185ccf3bc3b8eb61
|
#!/usr/bin/python3
"""perceptron.py: Predicts whether some data belongs to one class or another."""
__author__ = 'Andrei Muntean'
__license__ = 'MIT License'
import numpy as np
class Perceptron:
def __init__(self, learning_rate = 1):
self.learning_rate = learning_rate
def train(self, data, labels, maximum_iterations = 100000):
# Stores the labels.
self.labels = np.unique(labels)
# The weights represent the orientation of a (feature vector size - 1)-dimensional hyperplane.
self.weights = np.random.randn(data.shape[1])
# The bias determines the offset of the hyperplane.
self.bias = 0
# The hyperplane will be adjusted so that it separates feature vectors into one of two classes.
for iteration in range(0, maximum_iterations):
error_count = 0;
# Goes through every example.
for index in range(0, data.shape[0]):
features = data[index, :]
# Predicts a label.
label = self.predict(features)
# Determines whether the prediction is incorrect.
error = self.get_signal(labels[index]) - self.get_signal(label)
if not error == 0:
# Adjusts the weights.
self.weights += self.learning_rate * error * features
# Adjusts the bias.
self.bias += self.learning_rate * error
# Increments the error counter.
error_count += 1
if error_count == 0:
# Convergence; can no longer be optimized.
print('Perceptron converged! ({0} iterations)'.format(iteration + 1))
return
def predict(self, features):
# Calculates a value which -- if sufficiently big -- fires the neuron.
activator = np.sum(np.multiply(self.weights, features)) + self.bias
if activator > 0:
return self.get_label(1)
else:
return self.get_label(0)
def get_signal(self, label):
# Finds the index of label in self.labels.
return np.where(self.labels == label)[0][0]
def get_label(self, signal):
return self.labels[signal]
|
andreimuntean/Perceptron
|
Perceptron/perceptron.py
|
Python
|
mit
| 1,909
|
[
"NEURON"
] |
986733d92e4aa67245e51e32e761c6813ce670a7b06ce8472b7155589f130665
|
# -*- coding: utf-8 -*-
#
# Name: face.com Python API client library
# Description: face.com REST API Python client library.
#
# For more information about the API and the return values,
# visit the official documentation at http://developers.face.com/docs/api/.
#
# Author: Tomaž Muraus (http://www.tomaz-muraus.info)
# License: GPL (http://www.gnu.org/licenses/gpl.html)
# Version: 1.0
import urllib
import urllib2
import json
import os.path
API_URL = 'http://api.face.com'
class FaceClient():
def __init__(self, api_key = None, api_secret = None):
if not api_key or not api_secret:
raise AttributeError('Missing api_key or api_secret argument')
self.api_key = api_key
self.api_secret = api_secret
self.format = 'json'
self.twitter_credentials = None
self.facebook_credentials = None
def set_twitter_user_credentials(self, user = None, password = None):
if not user or not password:
raise AttributeError('Missing Twitter username or password')
self.twitter_credentials = {'twitter_user': user,
'twitter_password': password}
def set_twitter_oauth_credentials(self, user = None, secret = None, token = None):
if not user or not secret or not token:
raise AttributeError('Missing one of the required arguments')
self.twitter_credentials = {'twitter_oauth_user': user,
'twitter_oauth_secret': secret,
'twitter_oauth_token': token}
def set_facebook_credentials(self, user = None, session = None):
if not user or not session:
raise AttributeError('Missing Facebook user or session argument')
self.facebook_credentials = {'fb_user': user,
'fb_session': session}
### Recognition engine methods ###
def faces_detect(self, urls = None, file = None, aggressive=False):
"""
Returns tags for detected faces in one or more photos, with geometric information
of the tag, eyes, nose and mouth, as well as the gender, glasses, and smiling attributes.
http://developers.face.com/docs/api/faces-detect/
"""
if not urls and not file:
raise AttributeError('Missing URLs/filename argument')
if file:
# Check if the file exists
if not os.path.exists(file):
raise IOError('File %s does not exist' % (file))
data = {'file': file}
else:
data = {'urls': urls}
if aggressive:
data['detector'] = 'Aggressive'
response = self.send_request('faces/detect', data)
return response
def faces_status(self, uids = None, namespace = None):
"""
Reports training set status for the specified UIDs.
http://developers.face.com/docs/api/faces-status/
"""
if not uids:
raise AttributeError('Missing user IDs')
(facebook_uids, twitter_uids) = self.__check_user_auth_credentials(uids)
data = {'uids': uids}
self.__append_user_auth_data(data, facebook_uids, twitter_uids)
self.__append_optional_arguments(data, namespace = namespace)
response = self.send_request('faces/status', data)
return response
def faces_recognize(self, uids = None, urls = None, file = None, train = None, \
namespace = None):
"""
Attempts to detect and recognize one or more user IDs' faces, in one or more photos.
For each detected face, the face.com engine will return the most likely user IDs,
or empty result for unrecognized faces. In addition, each tag includes a threshold
score - any score below this number is considered a low-probability hit.
http://developers.face.com/docs/api/faces-recognize/
"""
if not uids or (not urls and not file):
raise AttributeError('Missing required arguments')
(facebook_uids, twitter_uids) = self.__check_user_auth_credentials(uids)
data = {'uids': uids}
if file:
# Check if the file exists
if not os.path.exists(file):
raise IOError('File %s does not exist' % (file))
data.update({'file': file})
else:
data.update({'urls': urls})
self.__append_user_auth_data(data, facebook_uids, twitter_uids)
self.__append_optional_arguments(data, train = train, namespace = namespace)
response = self.send_request('faces/recognize', data)
return response
def faces_train(self, uids = None, namespace = None):
"""
Calls the training procedure for the specified UIDs, and reports back changes.
http://developers.face.com/docs/api/faces-train/
"""
if not uids:
raise AttributeError('Missing user IDs')
(facebook_uids, twitter_uids) = self.__check_user_auth_credentials(uids)
data = {'uids': uids}
self.__append_user_auth_data(data, facebook_uids, twitter_uids)
self.__append_optional_arguments(data, namespace = namespace)
response = self.send_request('faces/train', data)
return response
### Methods for managing face tags ###
def tags_get(self, uids = None, urls = None, pids = None, order = 'recent', \
limit = 5, together = False, filter = None, namespace = None):
"""
Returns saved tags in one or more photos, or for the specified User ID(s).
This method also accepts multiple filters for finding tags corresponding to
a more specific criteria such as front-facing, recent, or where two or more
users appear together in same photos.
http://developers.face.com/docs/api/tags-get/
"""
(facebook_uids, twitter_uids) = self.__check_user_auth_credentials(uids)
data = {'uids': uids,
'urls': urls,
'together': together,
'limit': limit}
self.__append_user_auth_data(data, facebook_uids, twitter_uids)
self.__append_optional_arguments(data, pids = pids, filter = filter, \
namespace = namespace)
response = self.send_request('tags/get', data)
return response
def tags_add(self, url = None, x = None, y = None, width = None, uid = None, \
tagger_id = None, label = None, password = None):
"""
Add a (manual) face tag to a photo. Use this method to add face tags where
those were not detected for completeness of your service.
http://developers.face.com/docs/api/tags-add/
"""
if not url or not x or not y or not width or not uid or not tagger_id:
raise AttributeError('Missing one of the required arguments')
(facebook_uids, twitter_uids) = self.__check_user_auth_credentials(uid)
data = {'url': url,
'x': x,
'y': y,
'width': width,
'uid': uid,
'tagger_id': tagger_id}
self.__append_user_auth_data(data, facebook_uids, twitter_uids)
self.__append_optional_arguments(data, label = label, password = password)
response = self.send_request('tags/add', data)
return response
def tags_save(self, tids = None, uid = None, tagger_id = None, label = None, \
password = None):
"""
Saves a face tag. Use this method to save tags for training the face.com
index, or for future use of the faces.detect and tags.get methods.
http://developers.face.com/docs/api/tags-save/
"""
if not tids or not uid:
raise AttributeError('Missing required argument')
(facebook_uids, twitter_uids) = self.__check_user_auth_credentials(uid)
data = {'tids': tids,
'uid': uid}
self.__append_user_auth_data(data, facebook_uids, twitter_uids)
self.__append_optional_arguments(data, tagger_id = tagger_id, label = label, \
password = password)
response = self.send_request('tags/save', data)
return response
def tags_remove(self, tids = None, password = None):
"""
Remove a previously saved face tag from a photo.
http://developers.face.com/docs/api/tags-remove/
"""
if not tids:
raise AttributeError('Missing tag IDs')
data = {'tids': tids}
response = self.send_request('tags/remove', data)
return response
### Account management methods ###
def account_limits(self):
"""
Returns current rate limits for the account represented by the passed API key and Secret.
http://developers.face.com/docs/api/account-limits/
"""
response = self.send_request('account/limits')
return response['usage']
def account_users(self, namespaces = None):
"""
Returns current rate limits for the account represented by the passed API key and Secret.
http://developers.face.com/docs/api/account-limits/
"""
if not namespaces:
raise AttributeError('Missing namespaces argument')
response = self.send_request('account/users', {'namespaces': namespaces})
return response
def __check_user_auth_credentials(self, uids):
# Check if needed credentials are provided
facebook_uids = [uid for uid in uids.split(',') \
if uid.find('@facebook.com') != -1]
twitter_uids = [uid for uid in uids.split(',') \
if uid.find('@twitter.com') != -1]
if facebook_uids and not self.facebook_credentials:
raise AttributeError('You need to set Facebook credentials to perform action on Facebook users')
if twitter_uids and not self.twitter_credentials:
raise AttributeError('You need to set Twitter credentials to perform action on Twitter users')
return (facebook_uids, twitter_uids)
def __append_user_auth_data(self, data, facebook_uids, twitter_uids):
if facebook_uids:
data.update({'user_auth': 'fb_user:%s,fb_session:%s' % (self.facebook_credentials['fb_user'],
self.facebook_credentials['fb_session'])})
if twitter_uids:
# If both user/password and OAuth credentials are provided, use
# OAuth as default
if self.twitter_credentials.get('twitter_oauth_user', None):
data.update({'user_auth': 'twitter_oauth_user:%s,twitter_oauth_secret:%s,twitter_oauth_token:%s' %
(self.twitter_credentials['twitter_oauth_user'], self.twitter_credentials['twitter_oauth_secret'], \
self.twitter_credentials['twitter_oauth_token'])})
else:
data.update({'user_auth': 'twitter_user:%s,twitter_password:%s' % (self.twitter_credentials['twitter_user'],
self.twitter_credentials['twitter_password'])})
def __append_optional_arguments(self, data, **kwargs):
for key, value in kwargs.iteritems():
if value:
data.update({key: value})
def send_request(self, method = None, parameters = None):
url = '%s/%s' % (API_URL, method)
data = {'api_key': self.api_key,
'api_secret': self.api_secret,
'format': self.format}
if parameters:
data.update(parameters)
# Local file is provided, use multi-part form
if 'file' in parameters:
from multipart import Multipart
form = Multipart()
for key, value in data.iteritems():
if key == 'file':
with open(value, 'r') as file:
form.file(os.path.basename(key), os.path.basename(key), file.read())
else:
form.field(key, value)
(content_type, post_data) = form.get()
headers = {'Content-Type': content_type}
else:
post_data = urllib.urlencode(data)
headers = {}
request = urllib2.Request(url, headers = headers, data = post_data)
response = urllib2.urlopen(request)
response = response.read()
response_data = json.loads(response)
if 'status' in response_data and \
response_data['status'] == 'failure':
raise FaceError(response_data['error_code'], response_data['error_message'])
return response_data
class FaceError(Exception):
def __init__(self, error_code, error_message):
self.error_code = error_code
self.error_message = error_message
def __str__(self):
return '%s (%d)' % (self.error_message, self.error_code)
|
pizzapanther/Church-Source
|
dependencies/face_client/face_client.py
|
Python
|
gpl-3.0
| 11,111
|
[
"VisIt"
] |
20795acb5d86cd330a04c445d1f789611522ac29764bc505dfc3404c6f0f4f81
|
from frappe import _
def get_data():
return [
{
"label": _("Sales Pipeline"),
"icon": "fa fa-star",
"items": [
{
"type": "doctype",
"name": "Lead",
"description": _("Database of potential customers."),
},
{
"type": "doctype",
"name": "Opportunity",
"description": _("Potential opportunities for selling."),
},
{
"type": "doctype",
"name": "Customer",
"description": _("Customer database."),
},
{
"type": "doctype",
"name": "Contact",
"description": _("All Contacts."),
},
]
},
{
"label": _("Reports"),
"icon": "fa fa-list",
"items": [
{
"type": "report",
"is_query_report": True,
"name": "Lead Details",
"doctype": "Lead"
},
{
"type": "page",
"name": "sales-funnel",
"label": _("Sales Funnel"),
"icon": "fa fa-bar-chart",
},
{
"type": "report",
"name": "Prospects Engaged But Not Converted",
"doctype": "Lead",
"is_query_report": True
},
{
"type": "report",
"name": "Minutes to First Response for Opportunity",
"doctype": "Opportunity",
"is_query_report": True
},
{
"type": "report",
"is_query_report": True,
"name": "Customer Addresses And Contacts",
"doctype": "Contact"
},
{
"type": "report",
"is_query_report": True,
"name": "Inactive Customers",
"doctype": "Sales Order"
},
{
"type": "report",
"is_query_report": True,
"name": "Campaign Efficiency",
"doctype": "Lead"
},
{
"type": "report",
"is_query_report": True,
"name": "Lead Owner Efficiency",
"doctype": "Lead"
}
]
},
{
"label": _("Communication"),
"icon": "fa fa-star",
"items": [
{
"type": "doctype",
"name": "Communication",
"description": _("Record of all communications of type email, phone, chat, visit, etc."),
},
]
},
{
"label": _("Setup"),
"icon": "fa fa-cog",
"items": [
{
"type": "doctype",
"name": "Campaign",
"description": _("Sales campaigns."),
},
{
"type": "doctype",
"label": _("Customer Group"),
"name": "Customer Group",
"icon": "fa fa-sitemap",
"link": "Tree/Customer Group",
"description": _("Manage Customer Group Tree."),
},
{
"type": "doctype",
"label": _("Territory"),
"name": "Territory",
"icon": "fa fa-sitemap",
"link": "Tree/Territory",
"description": _("Manage Territory Tree."),
},
{
"type": "doctype",
"label": _("Sales Person"),
"name": "Sales Person",
"icon": "fa fa-sitemap",
"link": "Tree/Sales Person",
"description": _("Manage Sales Person Tree."),
},
{
"type": "doctype",
"name": "Lead Source",
"description": _("Track Leads by Lead Source.")
},
]
},
{
"label": _("SMS"),
"icon": "fa fa-wrench",
"items": [
{
"type": "doctype",
"name": "SMS Center",
"description":_("Send mass SMS to your contacts"),
},
{
"type": "doctype",
"name": "SMS Log",
"description":_("Logs for maintaining sms delivery status"),
},
{
"type": "doctype",
"name": "SMS Settings",
"description": _("Setup SMS gateway settings")
}
]
},
{
"label": _("Help"),
"items": [
{
"type": "help",
"label": _("Lead to Quotation"),
"youtube_id": "TxYX4r4JAKA"
},
{
"type": "help",
"label": _("Newsletters"),
"youtube_id": "muLKsCrrDRo"
},
]
},
]
|
patilsangram/erpnext
|
erpnext/config/crm.py
|
Python
|
gpl-3.0
| 3,615
|
[
"VisIt"
] |
20ca58443771c9a246f19b5743239729954ec57169b467732303a03eb41863ee
|
#!/usr/bin/env python
import os
import sys
import time
import logging
import numpy as np
import pandas as pd
from datetime import datetime
from sklearn.ensemble import RandomForestClassifier
from sklearn.model_selection import RandomizedSearchCV
from sklearn.preprocessing import MinMaxScaler
from moloi.config_processing import read_model_config
from moloi.evaluation import evaluate, make_scoring
from moloi.splits.cv import create_cv
from moloi.data_processing import get_data, clean_data
root_address = os.path.dirname(os.path.realpath(__file__)).replace("/moloi/bin", "")
output = root_address + "/tmp/" + str(datetime.now()) + '/'
data_config = "/data/data_configs/bace.ini"
model_config = "/data/model_configs/configs.ini"
section = 'RF'
descriptors = ['rdkit', 'mordred', 'morgan', 'maccs']
n_bits = 2048
n_cv = 5
n_iter = 5
n_jobs = -1
patience = 100
metric = 'roc_auc'
split_type = 'scaffold'
split_s = 0.1
targets = [0]
random_state = 1337
verbose = 10
time_start = datetime.now()
# create experiment folder before starting log
if not os.path.exists(output):
os.makedirs(output)
if not os.path.exists(output+"results/*"):
os.makedirs(output+"results/")
# create logger object, it is passed to all functions in the program
logger = logging.getLogger(__name__)
logger.setLevel(logging.INFO)
formatter = logging.Formatter('%(asctime)s [%(name)s] %(levelname)s: %(message)s')
# writing log to file
handler = logging.FileHandler(output + 'log')
handler.setFormatter(formatter)
logger.addHandler(handler)
# writing log to terminal (for stdout `stream=sys.stdout`)
handler = logging.StreamHandler()
handler.setFormatter(formatter)
logger.addHandler(handler)
# load data and configs
epochs, rparams, gparams = read_model_config(root_address+model_config, section)
x_train, x_test, x_val, y_val, y_train, y_test, input_shape, output_shape, smiles = get_data(logger, root_address+data_config, n_bits,
targets, random_state, split_type, split_s,
verbose, descriptors, n_jobs)
x_train = clean_data(x_train)
x_test = clean_data(x_test)
x_val = clean_data(x_val)
# Scale
transformer_X = MinMaxScaler().fit(x_train)
x_train = transformer_X.transform(x_train)
x_test = transformer_X.transform(x_test)
x_val = transformer_X.transform(x_val)
"""
from sklearn.feature_selection import SelectKBest
from sklearn.feature_selection import chi2
test = SelectKBest(score_func=chi2, k=3000)
fit = test.fit(x_train, y_train)
x_train = fit.transform(x_train)
x_test = fit.transform(x_test)
x_val = fit.transform(x_val)
"""
if len(np.unique(y_train)) == 1 or len(np.unique(y_test)) == 1 or len(np.unique(y_val)) == 1:
logger.error("Only one class in data")
sys.exit(0)
if len(np.unique(y_train)) > 2 or len(np.unique(y_test)) > 2 or len(np.unique(y_val)) > 2 and "roc_auc" in metric:
logger.error("Multiclass data: can not use ROC AUC metric")
sys.exit(0)
scoring = make_scoring(metric)
n_cv = create_cv(smiles, split_type, n_cv, random_state)
sklearn_params = {'param_distributions': gparams,
'n_iter': n_iter,
'n_jobs': n_jobs,
'cv': n_cv,
'verbose': verbose,
'scoring': scoring,
'return_train_score': True,
'random_state': random_state}
model = RandomizedSearchCV(RandomForestClassifier(**rparams), **sklearn_params)
model.fit(x_train, np.ravel(y_train))
from sklearn.feature_selection import RFE
model = RandomForestClassifier()
rfe = RFE(model, 1000)
fit = rfe.fit(x_train, np.ravel(y_train))
x_train = fit.transform(x_train)
x_test = fit.transform(x_test)
x_val = fit.transform(x_val)
model = RandomizedSearchCV(RandomForestClassifier(**rparams), **sklearn_params)
model.fit(x_train, np.ravel(y_train))
rparams = model.best_params_
grid = pd.DataFrame(model.cv_results_).sort_values(by='mean_test_score', ascending=False)
grid.to_csv(output + "gridsearch.csv")
model = model.best_estimator_
accuracy_test, accuracy_train, rec, auc, auc_val, f1, path = evaluate(logger, False, random_state, output, model, x_train, x_test, x_val, y_val, y_train, y_test,
time_start, rparams, False, section, n_jobs, descriptors, grid)
print("accuracy_test:", accuracy_test)
print("accuracy_train:", accuracy_train)
print("rec:", str(rec))
print("auc:", str(auc))
print("auc_val:", str(auc_val))
print("f1", f1)
print("Report address:", path)
print("Done")
|
DentonJC/virtual_screening
|
moloi/bin/rf.py
|
Python
|
gpl-3.0
| 4,657
|
[
"RDKit"
] |
fa6fd939c75c3c15f40233962559c66b251a2160e1fc2f8c90013f41edecf481
|
#
# Copyright 2018 Analytics Zoo Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import ray
import pytest
from pyspark.sql.types import ArrayType, DoubleType
from pyspark.sql import SparkSession
@pytest.fixture(autouse=True, scope='package')
def orca_context_fixture():
from zoo.orca import init_orca_context, stop_orca_context
sc = init_orca_context(cores=8)
def to_array_(v):
return v.toArray().tolist()
def flatten_(v):
result = []
for elem in v:
result.extend(elem.toArray().tolist())
return result
spark = SparkSession(sc)
spark.udf.register("to_array", to_array_, ArrayType(DoubleType()))
spark.udf.register("flatten", flatten_, ArrayType(DoubleType()))
yield
stop_orca_context()
|
intel-analytics/analytics-zoo
|
pyzoo/test/zoo/orca/learn/ray/pytorch/conftest.py
|
Python
|
apache-2.0
| 1,280
|
[
"ORCA"
] |
682af61fc1f8d57771626dd5b23107625a4e330a7d2dacb47d71112a41b3bfa8
|
"""
==================================================
Automatic Relevance Determination Regression (ARD)
==================================================
Fit regression model with Bayesian Ridge Regression.
See :ref:`bayesian_ridge_regression` for more information on the regressor.
Compared to the OLS (ordinary least squares) estimator, the coefficient
weights are slightly shifted toward zeros, which stabilises them.
The histogram of the estimated weights is very peaked, as a sparsity-inducing
prior is implied on the weights.
The estimation of the model is done by iteratively maximizing the
marginal log-likelihood of the observations.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from scipy import stats
from sklearn.linear_model import ARDRegression, LinearRegression
###############################################################################
# Generating simulated data with Gaussian weights
# Parameters of the example
np.random.seed(0)
n_samples, n_features = 100, 100
# Create Gaussian data
X = np.random.randn(n_samples, n_features)
# Create weights with a precision lambda_ of 4.
lambda_ = 4.
w = np.zeros(n_features)
# Only keep 10 weights of interest
relevant_features = np.random.randint(0, n_features, 10)
for i in relevant_features:
w[i] = stats.norm.rvs(loc=0, scale=1. / np.sqrt(lambda_))
# Create noise with a precision alpha of 50.
alpha_ = 50.
noise = stats.norm.rvs(loc=0, scale=1. / np.sqrt(alpha_), size=n_samples)
# Create the target
y = np.dot(X, w) + noise
###############################################################################
# Fit the ARD Regression
clf = ARDRegression(compute_score=True)
clf.fit(X, y)
ols = LinearRegression()
ols.fit(X, y)
###############################################################################
# Plot the true weights, the estimated weights and the histogram of the
# weights
plt.figure(figsize=(6, 5))
plt.title("Weights of the model")
plt.plot(clf.coef_, color='darkblue', linestyle='-', linewidth=2,
label="ARD estimate")
plt.plot(ols.coef_, color='yellowgreen', linestyle=':', linewidth=2,
label="OLS estimate")
plt.plot(w, color='orange', linestyle='-', linewidth=2, label="Ground truth")
plt.xlabel("Features")
plt.ylabel("Values of the weights")
plt.legend(loc=1)
plt.figure(figsize=(6, 5))
plt.title("Histogram of the weights")
plt.hist(clf.coef_, bins=n_features, color='navy', log=True)
plt.scatter(clf.coef_[relevant_features], 5 * np.ones(len(relevant_features)),
color='gold', marker='o', label="Relevant features")
plt.ylabel("Features")
plt.xlabel("Values of the weights")
plt.legend(loc=1)
plt.figure(figsize=(6, 5))
plt.title("Marginal log-likelihood")
plt.plot(clf.scores_, color='navy', linewidth=2)
plt.ylabel("Score")
plt.xlabel("Iterations")
plt.show()
|
RPGOne/Skynet
|
scikit-learn-0.18.1/examples/linear_model/plot_ard.py
|
Python
|
bsd-3-clause
| 2,828
|
[
"Gaussian"
] |
411f31846b536d3610cc9c215f91b850a6ed59f5d3cefc3bdc22421775f2e90e
|
# Copyright 2004-2017 Tom Rothamel <pytom@bishoujo.us>
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation files
# (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify, merge,
# publish, distribute, sublicense, and/or sell copies of the Software,
# and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
# TODO: Use overlap (rather than simple pointer location) to determine
# drag and drop.
import renpy.display
from renpy.display.render import render, Render, redraw
from renpy.display.core import absolute
from renpy.display.behavior import map_event, run, run_unhovered
import pygame_sdl2 as pygame
def default_drag_group():
"""
Gets the default drag group. If it doesn't exist yet, creates it.
"""
sls = renpy.game.context().scene_lists
rv = sls.drag_group
if rv is None:
rv = DragGroup()
sls.drag_group = rv
return rv
def default_drag_joined(drag):
return [ (drag, 0, 0) ]
class Drag(renpy.display.core.Displayable, renpy.python.RevertableObject):
"""
:doc: drag_drop class
:args: (d=None, drag_name=None, draggable=True, droppable=True, drag_raise=True, dragged=None, dropped=None, drag_handle=(0.0, 0.0, 1.0, 1.0), drag_joined=..., clicked=None, hovered=None, unhovered=None, **properties)
A displayable that represents an object that can be dragged around
its enclosing area. A Drag can also represent an area that
other Drags can be dropped on.
A Drag can be moved around inside is parent. Generally, its parent
should be either a :func:`Fixed` or :class:`DragGroup`.
A Drag has one child. The child's state reflects the status
of the drag and drop operation:
* ``selected_hover`` - when it is being dragged.
* ``selected_idle`` - when it can be dropped on.
* ``hover`` - when the draggable will be dragged when the mouse is
clicked.
* ``idle`` - otherwise.
The drag handle is a rectangle inside the child. The mouse must be over
a non-transparent pixel inside the drag handle for dragging or clicking
to occur.
A newly-created draggable is added to the default DragGroup. A draggable
can only be in a single DragGroup - if it's added to a second group,
it's removed from the first.
When a Drag is first rendered, if it's position cannot be determined
from the DragGroup it is in, the position of its upper-left corner
is computed using the standard layout algorithm. Once that position
`d`
If present, the child of this Drag. Drags use the child style
in preference to this, if it's not None.
`drag_name`
If not None, the name of this draggable. This is available
as the `name` property of draggable objects. If a Drag
with the same name is or was in the DragGroup, the starting
position of this Drag is taken from that Draggable.
`draggable`
If true, the Drag can be dragged around the screen with
the mouse.
`droppable`
If true, other Drags can be dropped on this Drag.
`drag_raise`
If true, this Drag is raised to the top when it is dragged. If
it is joined to other Drags, all joined drags are raised.
`dragged`
A callback (or list of callbacks) that is called when the Drag
has been dragged. It is called with two arguments. The first is
a list of Drags that are being dragged. The second is either
a Drag that is being dropped onto, or None of a drop did not
occur. If the callback returns a value other than None, that
value is returned as the result of the interaction.
`dropped`
A callback (or list of callbacks) that is called when this Drag
is dropped onto. It is called with two arguments. The first
is the Drag being dropped onto. The second is a list of Drags that
are being dragged. If the callback returns a value other than None,
that value is returned as the result of the interaction.
When a dragged and dropped callback are triggered for the same
event, the dropped callback is only called if dragged returns
None.
`clicked`
A callback this is called, with no arguments, when the Drag is
clicked without being moved. A droppable can also be focused
and clicked. If the callback returns a value other than None,
that value is returned as the result of the interaction.
`drag_handle`
A (x, y, width, height) tuple, giving the position of the drag
handle within the child. In this tuple, integers are considered
to be a literal number of pixels, while floats are relative to
the size of the child.
`drag_joined`
This is called with the current Drag as an argument. It's
expected to return a list of [ (drag, x, y) ] tuples, giving
the draggables to drag as a unit. `x` and `y` are the offsets
of the drags relative to each other, they are not relative
to the corner of this drag.
`drag_offscreen`
If true, this draggable can be moved offscreen. This can be
dangerous to use with drag_joined or drags that can change
size, as the drags can leave the screen entirely, with no
way to get them back on the screen.
Except for `d`, all of the parameters are available as fields (with
the same name) on the Drag object. In addition, after the drag has
been rendered, the following fields become available:
`x`, `y`
The position of the Drag relative to its parent, in pixels.
`w`, `h`
The width and height of the Drag's child, in pixels.
"""
focusable = True
drag_group = None
old_position = None
drag_offscreen = False
def __init__(self,
d=None,
drag_name=None,
draggable=True,
droppable=True,
drag_raise=True,
dragged=None,
dropped=None,
drag_handle=(0.0, 0.0, 1.0, 1.0),
drag_joined=default_drag_joined,
clicked=None,
hovered=None,
unhovered=None,
replaces=None,
drag_offscreen=False,
style="drag",
**properties):
super(Drag, self).__init__(style=style, **properties)
self.drag_name = drag_name
self.draggable = draggable
self.droppable = droppable
self.drag_raise = drag_raise
self.dragged = dragged
self.dropped = dropped
self.drag_handle = drag_handle
self.drag_joined = drag_joined
self.clicked = clicked
self.hovered = hovered
self.unhovered = unhovered
self.drag_offscreen = drag_offscreen
# We're focusable if we can be dragged.
self.focusable = draggable
self.child = None
# Add us to a drag group on creation.
if drag_name:
self.drag_group = default_drag_group()
# The current x and y coordinates of this displayable.
self.x = None
self.y = None
# The width and height of the child.
self.w = None
self.h = None
self.old_position = None
# The width and height of our parent.
self.parent_width = None
self.parent_height = None
# The target x and y coordinates of this displayable. (The
# coordinates that we're snapping to.)
self.target_x = None
self.target_y = None
# The offset from the location of the mouse to the "grab point",
# which is where the things that are being moved are offset from.
self.grab_x = None
self.grab_y = None
# x and y from the last time we rendered.
self.last_x = None
self.last_y = None
# The abs_x and abs_y from when we started the grab.
self.start_x = 0
self.start_y = 0
# The last time we were shown, using the animation timebases.
self.at = 0
# The (animation timebase) time at which we should reach
# the target coordinates for the currently executing snap animation.
self.target_at = 0
# The duration of a new snap animation to execute starting at
# the next render() call
self.target_at_delay = 0
# The displayable we were last dropping on.
self.last_drop = None
# Did we move over the course of this drag?
self.drag_moved = False
if replaces is not None:
self.x = replaces.x
self.y = replaces.y
self.at = replaces.at
self.target_x = replaces.target_x
self.target_y = replaces.target_y
self.target_at = replaces.target_at
self.target_at_delay = replaces.target_at_delay
self.grab_x = replaces.grab_x
self.grab_y = replaces.grab_y
self.last_x = replaces.last_x
self.last_y = replaces.last_y
self.old_position = replaces.old_position
self.drag_moved = replaces.drag_moved
self.last_drop = replaces.last_drop
if d is not None:
self.add(d)
def snap(self, x, y, delay=0):
"""
:doc: drag_drop method
Changes the position of the drag. If the drag is not showing,
then the position change is instantaneous. Otherwise, the
position change takes `delay` seconds, and is animated as a
linear move.
"""
if type(x) is float:
x = int(x * self.parent_width)
if type(y) is float:
y = int(y * self.parent_height)
self.target_x = x
self.target_y = y
if self.x is not None:
self.target_at_delay = delay
else:
self.target_at = self.at
self.x = x
self.y = y
if self.drag_group is not None:
self.drag_group.positions[self.drag_name] = (x, y, self.old_position)
redraw(self, 0)
def set_style_prefix(self, prefix, root):
if root:
super(Drag, self).set_style_prefix(prefix, root)
if self.child is not None:
self.child.set_style_prefix(prefix, False)
def add(self, d):
if self.child is not None:
raise Exception("Drag expects either zero or one children.")
self.child = renpy.easy.displayable(d)
def _clear(self):
self.child = None
renpy.display.render.redraw(self, 0)
def set_child(self, d):
"""
:doc: drag_drop method
Changes the child of this drag to `d`.
"""
d.per_interact()
self.child = renpy.easy.displayable(d)
renpy.display.render.invalidate(self)
def top(self):
"""
:doc: drag_drop method
Raises this displayable to the top of its drag_group.
"""
if self.drag_group is not None:
self.drag_group.raise_children([ self ])
def visit(self):
return [ self.child ]
def focus(self, default=False):
super(Drag, self).focus(default)
rv = None
if not default:
rv = run(self.hovered)
return rv
def unfocus(self, default=False):
super(Drag, self).unfocus(default)
if not default:
run_unhovered(self.hovered)
run(self.unhovered)
def render(self, width, height, st, at):
child = self.style.child
if child is None:
child = self.child
self.parent_width = width
self.parent_height = height
cr = render(child, width, height, st, at)
cw, ch = cr.get_size()
rv = Render(cw, ch)
rv.blit(cr, (0, 0))
self.w = cw
self.h = ch
position = (self.style.xpos, self.style.ypos, self.style.xanchor, self.style.yanchor, self.style.xoffset, self.style.yoffset)
# If we don't have a position, then look for it in a drag group.
if (self.x is None) and (self.drag_group is not None) and (self.drag_name is not None):
if self.drag_name in self.drag_group.positions:
dgp = self.drag_group.positions[self.drag_name]
if len(dgp) == 3:
self.x, self.y, self.old_position = dgp
else:
self.x, self.y = dgp
self.old_position = position
if self.old_position != position:
place = True
elif self.x is None:
place = True
else:
place = False
# If we don't have a position, run the placement code and use
# that to compute our placement.
if place:
# This is required to get get_placement to work properly.
self.x = None
place_x, place_y = self.place(None, 0, 0, width, height, rv)
self.x = int(place_x)
self.y = int(place_y)
self.target_x = None
self.old_position = position
if self.target_x is None:
self.target_x = self.x
self.target_y = self.y
self.target_at = at
# Determine if we need to do the snap animation.
if self.target_at_delay:
# Snap starts now
self.target_at = at + self.target_at_delay
self.target_at_delay = 0
redraw(self, 0)
elif at >= self.target_at:
# Snap complete
self.x = self.target_x
self.y = self.target_y
else:
# Snap in progress
done = (at - self.at) / (self.target_at - self.at)
self.x = absolute(self.x + done * (self.target_x - self.x))
self.y = absolute(self.y + done * (self.target_y - self.y))
redraw(self, 0)
if self.draggable or self.clicked is not None:
fx, fy, fw, fh = self.drag_handle
if isinstance(fx, float):
fx = int(fx * cw)
if isinstance(fy, float):
fy = int(fy * ch)
if isinstance(fw, float):
fw = int(fw * cw)
if isinstance(fh, float):
fh = int(fh * ch)
mask = self.style.focus_mask
if mask is True:
mask = cr.subsurface((fx, fy, fw, fh))
elif mask is not None:
try:
mask = renpy.display.render.render(mask, fw, fh, st, at)
except:
if callable(mask):
mask = mask
else:
raise Exception("Focus_mask must be None, True, a displayable, or a callable.")
if mask is not None:
fmx = 0
fmy = 0
else:
fmx = None
fmy = None
rv.add_focus(self, None, fx, fy, fw, fh, fmx, fmy, mask)
self.last_x = self.x
self.last_y = self.y
self.at = at
return rv
def event(self, ev, x, y, st):
if not self.is_focused():
return self.child.event(ev, x, y, st)
# Mouse, in parent-relative coordinates.
par_x = int(self.last_x + x)
par_y = int(self.last_y + y)
grabbed = (renpy.display.focus.get_grab() is self)
if grabbed:
joined_offsets = self.drag_joined(self)
joined = [ i[0] for i in joined_offsets ]
elif self.draggable and map_event(ev, "drag_activate"):
joined_offsets = self.drag_joined(self)
joined = [ i[0] for i in joined_offsets ]
if not joined:
raise renpy.display.core.IgnoreEvent()
renpy.display.focus.set_grab(self)
self.grab_x = x
self.grab_y = y
# If we're not the only thing we're joined with, we
# might need to adjust our grab point.
for i, xo, yo in joined_offsets:
if i is self:
self.grab_x += xo
self.grab_y += yo
break
self.drag_moved = False
self.start_x = par_x
self.start_y = par_y
grabbed = True
# Handle clicking on droppables.
if not grabbed:
if self.clicked is not None and map_event(ev, "drag_deactivate"):
rv = run(self.clicked)
if rv is not None:
return rv
raise renpy.display.core.IgnoreEvent()
return self.child.event(ev, x, y, st)
# Handle moves by moving things relative to the grab point.
if ev.type in (pygame.MOUSEMOTION, pygame.MOUSEBUTTONUP, pygame.MOUSEBUTTONDOWN):
handled = True
if not self.drag_moved and (self.start_x != par_x or self.start_y != par_y):
self.drag_moved = True
# We may not be in the drag_joined group.
self.set_style_prefix("idle_", True)
# Set the style.
for i in joined:
i.set_style_prefix("selected_hover_", True)
# Raise the joined items.
if self.drag_raise and self.drag_group is not None:
self.drag_group.raise_children(joined)
if self.drag_moved:
for i, xo, yo in joined_offsets:
new_x = int(par_x - self.grab_x + xo)
new_y = int(par_y - self.grab_y + yo)
if not self.drag_offscreen:
new_x = max(new_x, 0)
new_x = min(new_x, int(i.parent_width - i.w))
new_y = max(new_y, 0)
new_y = min(new_y, int(i.parent_height - i.h))
if i.drag_group is not None and i.drag_name is not None:
i.drag_group.positions[i.drag_name] = (new_x, new_y, self.old_position)
i.x = new_x
i.y = new_y
i.target_x = new_x
i.target_y = new_y
i.target_at = self.at
redraw(i, 0)
else:
handled = False
if (self.drag_group is not None) and self.drag_moved:
drop = self.drag_group.get_best_drop(joined)
else:
drop = None
if drop is not self.last_drop:
if self.last_drop is not None:
self.last_drop.set_style_prefix("idle_", True)
if drop is not None:
drop.set_style_prefix("selected_idle_", True)
self.last_drop = drop
if map_event(ev, 'drag_deactivate'):
renpy.display.focus.set_grab(None)
if drop is not None:
drop.set_style_prefix("idle_", True)
for i in joined:
i.set_style_prefix("idle_", True)
self.set_style_prefix("hover_", True)
self.grab_x = None
self.grab_y = None
self.last_drop = None
if self.drag_moved:
# Call the drag callback.
drag = joined[0]
if drag.dragged is not None:
rv = run(drag.dragged, joined, drop)
if rv is not None:
return rv
# Call the drop callback.
if drop is not None and drop.dropped is not None:
rv = run(drop.dropped, drop, joined)
if rv is not None:
return rv
else:
# Call the clicked callback.
if self.clicked:
rv = run(self.clicked)
if rv is not None:
return rv
if handled:
raise renpy.display.core.IgnoreEvent()
def get_placement(self):
if self.x is not None:
return self.x, self.y, 0, 0, 0, 0, True
else:
return super(Drag, self).get_placement()
def per_interact(self):
self.set_style_prefix("idle_", True)
super(Drag, self).per_interact()
class DragGroup(renpy.display.layout.MultiBox):
"""
:doc: drag_drop class
Represents a group of Drags. A Drag is limited to the boundary of
its DragGroup. Dropping only works between Drags that are in the
same DragGroup. Drags may only be raised when they are inside a
DragGroup.
A DragGroup is laid out like a :func:`Fixed`.
All positional parameters to the DragGroup constructor should be
Drags, that are added to the DragGroup.
"""
_list_type = renpy.python.RevertableList
def __unicode__(self):
return "DragGroup"
def __init__(self, *children, **properties):
properties.setdefault("style", "fixed")
properties.setdefault("layout", "fixed")
replaces = properties.pop("replaces", None)
super(DragGroup, self).__init__(**properties)
if replaces is not None:
self.positions = renpy.python.RevertableDict(replaces.positions)
self.sensitive = replaces.sensitive
else:
self.positions = renpy.python.RevertableDict()
self.sensitive = True
for i in children:
self.add(i)
def add(self, child):
"""
:doc: drag_drop method
Adds `child`, which must be a Drag, to this DragGroup.
"""
if not isinstance(child, Drag):
raise Exception("Only drags can be added to a drag group.")
child.drag_group = self
super(DragGroup, self).add(child)
def remove(self, child):
"""
:doc: drag_drop method
Removes `child` from this DragGroup.
"""
if not isinstance(child, Drag):
raise Exception("Only drags can be removed from a drag group.")
child.x = None
super(DragGroup, self).remove(child)
def event(self, ev, x, y, st):
if not self.sensitive:
return None
return super(DragGroup, self).event(ev, x, y, st)
def raise_children(self, l):
"""
Raises the children in `l` to the top of this drag_group, using the
order given in l for those children.
"""
s = set(l)
offset_map = { }
children = [ ]
offsets = [ ]
for i, c in enumerate(self.children):
if i < len(self.offsets):
o = self.offsets[i]
else:
o = (0, 0)
if c not in s:
children.append(c)
offsets.append(o)
else:
offset_map[c] = o
for c in l:
if c in offset_map:
children.append(c)
offsets.append(offset_map[c])
self.children = self._list_type(children)
self.offsets = self._list_type(offsets)
def get_best_drop(self, joined):
"""
Returns the droppable that the members of joined overlap the most.
"""
max_overlap = 0
rv = 0
joined_set = set(joined)
for d in joined:
r1 = (d.x, d.y, d.w, d.h)
for c in self.children:
if c in joined_set:
continue
if not c.droppable:
continue
if c.x is None:
continue
r2 = (c.x, c.y, c.w, c.h)
overlap = rect_overlap_area(r1, r2)
if overlap >= max_overlap:
rv = c
max_overlap = overlap
if max_overlap <= 0:
return None
else:
return rv
def get_children(self):
"""
Returns a list of Drags that are the children of
this DragGroup.
"""
return renpy.python.RevertableList(self.children)
def get_child_by_name(self, name):
"""
:doc: drag_drop method
Returns the first child of this DragGroup that has a drag_name
of name.
"""
for i in self.children:
if i.drag_name == name:
return i
return None
def rect_overlap_area(r1, r2):
"""
Returns the number of pixels by which rectangles r1 and r2 overlap.
"""
x1, y1, w1, h1 = r1
x2, y2, w2, h2 = r2
maxleft = max(x1, x2)
minright = min(x1 + w1, x2 + w2)
maxtop = max(y1, y2)
minbottom = min(y1 + h1, y2 + h2)
if minright < maxleft:
return 0
if minbottom < maxtop:
return 0
return (minright - maxleft) * (minbottom - maxtop)
|
kfcpaladin/sze-the-game
|
renpy/display/dragdrop.py
|
Python
|
mit
| 25,608
|
[
"VisIt"
] |
8ccb5c2a94f60bd1f9a0b7492c3437ccf272340c1fdbf49e7f00d8b54ba3ea12
|
# Natural Language Toolkit: Probability and Statistics
#
# Copyright (C) 2001 University of Pennsylvania
# Author: Edward Loper <edloper@gradient.cis.upenn.edu>
# Trevor Cohn <tacohn@cs.mu.oz.au> (additions)
# URL: <http://nltk.sf.net>
# For license information, see LICENSE.TXT
#
# $Id: probability.py,v 1.1.1.2 2004/09/29 21:57:49 adastra Exp $
"""
Classes for representing and processing probabilistic information.
The L{FreqDist} class is used to encode X{frequency distributions},
which count the number of times that each outcome of an experiment
occurs.
The L{ProbDistI} class defines a standard interface for X{probability
distributions}, which encode the probability of each outcome for an
experiment. There are two types of probability distribution:
- X{derived probability distributions} are created from frequency
distributions. They attempt to model the probability distribution
that generated the frequency distribution.
- X{analytic probability distributions} are created directly from
parameters (such as variance).
The L{ConditionalFreqDist} class and L{ConditionalProbDistI} interface
are used to encode conditional distributions. Conditional probability
distributions can be derived or analytic; but currently the only
implementation of the C{ConditionalProbDistI} interface is
L{ConditionalProbDist}, a derived distribution.
The L{ProbabilisticMixIn} class is a mix-in class that can be used to
associate probabilities with data classes (such as C{Token} or
C{Tree}).
@group Frequency Distributions: FreqDist
@group Derived Probability Distributions: ProbDistI, MLEProbDist,
LidstoneProbDist, LaplaceProbDist, ELEProbDist, HeldoutProbDist,
CrossValidationProbDist
@group Analyitic Probability Distributions: UniformProbDist
@group Conditional Distributions: ConditionalFreqDist,
ConditionalProbDistI, ConditionalProbDist
@group Probabilistic Mix-In: ProbabilisticMixIn
@sort: FreqDist, ProbDistI, MLEProbDist, LidstoneProbDist, LaplaceProbDist,
ELEProbDist, HeldoutProbDist, CrossValidationProbDist, UniformProbDist,
ConditionalFreqDist, ConditionalProbDistI, ConditionalProbDist
@todo: Better handling of log probabilities.
"""
from nltk.chktype import chktype as _chktype
from sets import Set
from nltk.util import sum_logs
import types, math, numpy
##//////////////////////////////////////////////////////
## Frequency Distributions
##//////////////////////////////////////////////////////
class FreqDist:
"""
A frequency distribution for the outcomes of an experiment. A
frequency distribution records the number of times each outcome of
an experiment has occured. For example, a frequency distribution
could be used to record the frequency of each word type in a
document. Formally, a frequency distribution can be defined as a
function mapping from each sample to the number of times that
sample occured as an outcome.
Frequency distributions are generally constructed by running a
number of experiments, and incrementing the count for a sample
every time it is an outcome of an experiment. For example, the
following code will produce a frequency distribution that encodes
how often each word occurs in a text:
>>> fdist = FreqDist()
>>> for token in text_token['SUBTOKENS']:
... fdist.inc(token['TEXT'])
"""
def __init__(self):
"""
Construct a new empty, C{FreqDist}. In particular, the count
for every sample is zero.
"""
self._count = {}
self._N = 0
self._Nr_cache = None
self._max_cache = None
def inc(self, sample, count=1):
"""
Increment this C{FreqDist}'s count for the given
sample.
@param sample: The sample whose count should be incremented.
@type sample: any
@param count: The amount to increment the sample's count by.
@type count: C{int}
@rtype: None
@raise NotImplementedError: If C{sample} is not a
supported sample type.
"""
assert _chktype(2, count, types.IntType)
if count == 0: return
self._N += count
self._count[sample] = self._count.get(sample,0) + count
# Invalidate the Nr cache and max cache.
self._Nr_cache = None
self._max_cache = None
def N(self):
"""
@return: The total number of sample outcomes that have been
recorded by this C{FreqDist}. For the number of unique
sample values (or bins) with counts greater than zero, use
C{FreqDist.B()}.
@rtype: C{int}
"""
return self._N
def B(self):
"""
@return: The total number of sample values (or X{bins}) that
have counts greater than zero. For the total
number of sample outcomes recorded, use C{FreqDist.N()}.
@rtype: C{int}
"""
return len(self._count)
def samples(self):
"""
@return: A list of all samples that have been recorded as
outcomes by this frequency distribution. Use C{count()}
to determine the count for each sample.
@rtype: C{list}
"""
return self._count.keys()
def Nr(self, r, bins=None):
"""
@return: The number of samples with count r.
@rtype: C{int}
@type r: C{int}
@param r: A sample count.
@type bins: C{int}
@param bins: The number of possible sample outcomes. C{bins}
is used to calculate Nr(0). In particular, Nr(0) is
C{bins-self.B()}. If C{bins} is not specified, it
defaults to C{self.B()} (so Nr(0) will be 0).
"""
assert _chktype(1, r, types.IntType)
assert _chktype(2, bins, types.IntType, types.NoneType)
if r < 0: raise IndexError, 'FreqDist.Nr(): r must be non-negative'
# Special case for Nr(0):
if r == 0:
if bins is None: return 0
else: return bins-self.B()
# We have to search the entire distribution to find Nr. Since
# this is an expensive operation, and is likely to be used
# repeatedly, cache the results.
if self._Nr_cache is None:
self._cache_Nr_values()
if r >= len(self._Nr_cache): return 0
return self._Nr_cache[r]
def _cache_Nr_values(self):
Nr = [0]
for sample in self.samples():
c = self._count.get(sample, 0)
if c >= len(Nr):
Nr += [0]*(c+1-len(Nr))
Nr[c] += 1
self._Nr_cache = Nr
def count(self, sample):
"""
Return the count of a given sample. The count of a sample is
defined as the number of times that sample outcome was
recorded by this C{FreqDist}. Counts are non-negative
integers.
@return: The count of a given sample.
@rtype: C{int}
@param sample: the sample whose count
should be returned.
@type sample: any.
"""
return self._count.get(sample, 0)
def freq(self, sample):
"""
Return the frequency of a given sample. The frequency of a
sample is defined as the count of that sample divided by the
total number of sample outcomes that have been recorded by
this C{FreqDist}. The count of a sample is defined as the
number of times that sample outcome was recorded by this
C{FreqDist}. Frequencies are always real numbers in the range
[0, 1].
@return: The frequency of a given sample.
@rtype: float
@param sample: the sample whose frequency
should be returned.
@type sample: any
"""
if self._N is 0: return 0
return float(self._count.get(sample, 0)) / self._N
def max(self):
"""
Return the sample with the greatest number of outcomes in this
frequency distribution. If two or more samples have the same
number of outcomes, return one of them; which sample is
returned is undefined. If no outcomes have occured in this
frequency distribution, return C{None}.
@return: The sample with the maximum number of outcomes in this
frequency distribution.
@rtype: any or C{None}
"""
if self._max_cache is None:
best_sample = None
best_count = -1
for sample in self._count.keys():
if self._count[sample] > best_count:
best_sample = sample
best_count = self._count[sample]
self._max_cache = best_sample
return self._max_cache
def sorted_samples(self):
"""
Return the samples sorted in decreasing order of frequency. Instances
with the same count will be arbitrarily ordered. Instances with a
count of zero will be omitted. This method is C{O(N^2)}, where C{N} is
the number of samples, but will complete in a shorter time on average.
@return: The set of samples in sorted order.
@rtype: sequence of any
"""
items = [(-count,sample) for (sample,count) in self._count.items()]
items.sort()
return [sample for (neg_count,sample) in items]
def __repr__(self):
"""
@return: A string representation of this C{FreqDist}.
@rtype: string
"""
return '<FreqDist with %d samples>' % self.N()
def __str__(self):
"""
@return: A string representation of this C{FreqDist}.
@rtype: string
"""
samples = self.sorted_samples()
items = ['%r: %r' % (s, self._count[s]) for s in samples]
return '<FreqDist: %s>' % ', '.join(items)
def __contains__(self, sample):
"""
@return: True if the given sample occurs one or more times in
this frequency distribution.
@rtype: C{boolean}
@param sample: The sample to search for.
@type sample: any
"""
return self._count.has_key(sample)
##//////////////////////////////////////////////////////
## Probability Distributions
##//////////////////////////////////////////////////////
class ProbDistI:
"""
A probability distribution for the outcomes of an experiment. A
probability distribution specifies how likely it is that an
experiment will have any given outcome. For example, a
probability distribution could be used to predict the probability
that a token in a document will have a given type. Formally, a
probability distribution can be defined as a function mapping from
samples to nonnegative real numbers, such that the sum of every
number in the function's range is 1.0. C{ProbDist}s are often
used to model the probability distribution of the experiment used
to generate a frequency distribution.
"""
def __init__(self):
if self.__class__ == ProbDistI:
raise AssertionError, "Interfaces can't be instantiated"
def prob(self, sample):
"""
@return: the probability for a given sample. Probabilities
are always real numbers in the range [0, 1].
@rtype: float
@param sample: The sample whose probability
should be returned.
@type sample: any
"""
raise AssertionError()
def logprob(self, sample):
"""
@return: the natural logarithm of the probability for a given
sample. Log probabilities range from negitive infinity to
zero.
@rtype: float
@param sample: The sample whose probability
should be returned.
@type sample: any
"""
# Default definition, in terms of prob()
p = self.prob(sample)
if p == 0:
# Use some approximation to infinity. What this does
# depends on your system's float implementation.
return -1e1000
else:
return math.log(p)
def max(self):
"""
@return: the sample with the greatest probability. If two or
more samples have the same probability, return one of them;
which sample is returned is undefined.
@rtype: any
"""
raise AssertionError()
def samples(self):
"""
@return: A list of all samples that have nonzero
probabilities. Use C{prob} to find the probability of
each sample.
@rtype: C{list}
"""
raise AssertionError()
class UniformProbDist(ProbDistI):
"""
A probability distribution that assigns equal probability to each
sample in a given set; and a zero probability to all other
samples.
"""
def __init__(self, samples):
"""
Construct a new uniform probability distribution, that assigns
equal probability to each sample in C{samples}.
@param samples: The samples that should be given uniform
probability.
@type samples: C{list}
@raise ValueError: If C{samples} is empty.
"""
assert _chktype(1, samples, [], ())
if len(samples) == 0:
raise ValueError('A Uniform probability distribution must '+
'have at least one sample.')
self._sampleset = Set(samples)
self._prob = 1.0/len(self._sampleset)
self._samples = list(self._sampleset)
def prob(self, sample):
if sample in self._sampleset: return self._prob
else: return 0
def max(self): return self._samples[0]
def samples(self): return self._samples
def __repr__(self):
return '<UniformProbDist with %d samples>' % len(self._sampleset)
class DictionaryProbDist(ProbDistI):
"""
A probability distribution whose probabilities are directly
specified by a given dictionary. The given dictionary maps
samples to probabilities.
"""
def __init__(self, prob_dict=None, log=False, normalize=False):
"""
Construct a new probability distribution from the given
dictionary, which maps values to probabilities (or to log
probabilities, if C{log} is true). If C{normalize} is
true, then the probability values are scaled by a constant
factor such that they sum to 1.
"""
assert _chktype(1, prob_dict, {})
self._prob_dict = prob_dict.copy()
self._log = log
# Normalize the distribution, if requested.
if normalize:
if log:
value_sum = sum_logs(self._prob_dict.values())
if value_sum <= -1e1000:
logp = math.log(1.0/len(prob_dict.keys()))
for x in prob_dict.keys():
self._prob_dict[x] = logp
else:
for (x, p) in self._prob_dict.items():
self._prob_dict[x] -= value_sum
else:
value_sum = sum(self._prob_dict.values())
if value_sum == 0:
p = 1.0/len(prob_dict.keys())
for x in prob_dict.keys():
self._prob_dict[x] = p
else:
norm_factor = 1.0/value_sum
for (x, p) in self._prob_dict.items():
self._prob_dict[x] *= norm_factor
def prob(self, sample):
if self._log:
if sample not in self._prob_dict: return 0
else: return math.exp(self._prob_dict[sample])
else:
return self._prob_dict.get(sample, 0)
def logprob(self, sample):
if self._log:
return self._prob_dict.get(sample, 1e-1000)
else:
if sample not in self._prob_dict: return 1e-1000
else: return math.log(self._prob_dict[sample])
def max(self):
if not hasattr(self, '_max'):
self._max = max([(p,v) for (v,p) in self._prob_dict.items()])[1]
return self._max
def samples(self):
return self._prob_dict.keys()
def __repr__(self):
return '<ProbDist with %d samples>' % len(self._prob_dict)
class MLEProbDist(ProbDistI):
"""
The maximum likelihood estimate for the probability distribution
of the experiment used to generate a frequency distribution. The
X{maximum likelihood estimate} approximates the probability of
each sample as the frequency of that sample in the frequency
distribution.
"""
def __init__(self, freqdist):
"""
Use the maximum likelihood estimate to create a probability
distribution for the experiment used to generate C{freqdist}.
@type freqdist: C{FreqDist}
@param freqdist: The frequency distribution that the
probability estimates should be based on.
"""
assert _chktype(1, freqdist, FreqDist)
if freqdist.N() == 0:
raise ValueError('An MLE probability distribution must '+
'have at least one sample.')
self._freqdist = freqdist
def freqdist(self):
"""
@return: The frequency distribution that this probability
distribution is based on.
@rtype: C{FreqDist}
"""
return self._freqdist
def prob(self, sample):
return self._freqdist.freq(sample)
def max(self):
return self._freqdist.max()
def samples(self):
return self._freqdist.samples()
def __repr__(self):
"""
@rtype: C{string}
@return: A string representation of this C{ProbDist}.
"""
return '<MLEProbDist based on %d samples>' % self._freqdist.N()
class LidstoneProbDist(ProbDistI):
"""
The Lidstone estimate for the probability distribution of the
experiment used to generate a frequency distribution. The
C{Lidstone estimate} is paramaterized by a real number M{gamma},
which typically ranges from 0 to 1. The X{Lidstone estimate}
approximates the probability of a sample with count M{c} from an
experiment with M{N} outcomes and M{B} bins as
M{(c+gamma)/(N+B*gamma)}. This is equivalant to adding
M{gamma} to the count for each bin, and taking the maximum
likelihood estimate of the resulting frequency distribution.
"""
def __init__(self, freqdist, gamma, bins=None):
"""
Use the Lidstone estimate to create a probability distribution
for the experiment used to generate C{freqdist}.
@type freqdist: C{FreqDist}
@param freqdist: The frequency distribution that the
probability estimates should be based on.
@type gamma: C{float}
@param gamma: A real number used to paramaterize the
estimate. The Lidstone estimate is equivalant to adding
M{gamma} to the count for each bin, and taking the
maximum likelihood estimate of the resulting frequency
distribution.
@type bins: C{int}
@param bins: The number of sample values that can be generated
by the experiment that is described by the probability
distribution. This value must be correctly set for the
probabilities of the sample values to sum to one. If
C{bins} is not specified, it defaults to C{freqdist.B()}.
"""
assert _chktype(1, freqdist, FreqDist)
assert _chktype(2, gamma, types.FloatType, types.IntType)
assert _chktype(3, bins, types.IntType, types.NoneType)
if (bins == 0) or (bins is None and freqdist.N() == 0):
name = self.__class__.__name__[:-8]
raise ValueError('A %s probability distribution ' % name +
'must have at least one bin.')
if (bins is not None) and (bins < freqdist.B()):
name = self.__class__.__name__[:-8]
raise ValueError('\nThe number of bins in a %s must be ' % name +
'greater than or equal to\nthe number of '+
'bins in the FreqDist used to create it.')
self._freqdist = freqdist
self._gamma = float(gamma)
self._N = self._freqdist.N()
if bins is None: bins = freqdist.B()
self._bins = bins
def freqdist(self):
"""
@return: The frequency distribution that this probability
distribution is based on.
@rtype: C{FreqDist}
"""
return self._freqdist
def prob(self, sample):
c = self._freqdist.count(sample)
return (c + self._gamma) / (self._N + self._bins * self._gamma)
def max(self):
# For Lidstone distributions, probability is monotonic with
# frequency, so the most probable sample is the one that
# occurs most frequently.
return self._freqdist.max()
def samples(self):
return self._freqdist.samples()
def __repr__(self):
"""
@rtype: C{string}
@return: A string representation of this C{ProbDist}.
"""
return '<LidstoneProbDist based on %d samples>' % self._freqdist.N()
class LaplaceProbDist(LidstoneProbDist):
"""
The Laplace estimate for the probability distribution of the
experiment used to generate a frequency distribution. The
X{Lidstone estimate} approximates the probability of a sample with
count M{c} from an experiment with M{N} outcomes and M{B} bins as
M{(c+1)/(N+B)}. This is equivalant to adding one to the count for
each bin, and taking the maximum likelihood estimate of the
resulting frequency distribution.
"""
def __init__(self, freqdist, bins=None):
"""
Use the Laplace estimate to create a probability distribution
for the experiment used to generate C{freqdist}.
@type freqdist: C{FreqDist}
@param freqdist: The frequency distribution that the
probability estimates should be based on.
@type bins: C{int}
@param bins: The number of sample values that can be generated
by the experiment that is described by the probability
distribution. This value must be correctly set for the
probabilities of the sample values to sum to one. If
C{bins} is not specified, it defaults to C{freqdist.B()}.
"""
assert _chktype(1, freqdist, FreqDist)
assert _chktype(2, bins, types.IntType, types.NoneType)
LidstoneProbDist.__init__(self, freqdist, 1, bins)
def __repr__(self):
"""
@rtype: C{string}
@return: A string representation of this C{ProbDist}.
"""
return '<LaplaceProbDist based on %d samples>' % self._freqdist.N()
class ELEProbDist(LidstoneProbDist):
"""
The expected likelihood estimate for the probability distribution
of the experiment used to generate a frequency distribution. The
X{expected likelihood estimate} approximates the probability of a
sample with count M{c} from an experiment with M{N} outcomes and
M{B} bins as M{(c+0.5)/(N+B/2)}. This is equivalant to adding 0.5
to the count for each bin, and taking the maximum likelihood
estimate of the resulting frequency distribution.
"""
def __init__(self, freqdist, bins=None):
"""
Use the expected likelihood estimate to create a probability
distribution for the experiment used to generate C{freqdist}.
@type freqdist: C{FreqDist}
@param freqdist: The frequency distribution that the
probability estimates should be based on.
@type bins: C{int}
@param bins: The number of sample values that can be generated
by the experiment that is described by the probability
distribution. This value must be correctly set for the
probabilities of the sample values to sum to one. If
C{bins} is not specified, it defaults to C{freqdist.B()}.
"""
LidstoneProbDist.__init__(self, freqdist, 0.5, bins)
def __repr__(self):
"""
@rtype: C{string}
@return: A string representation of this C{ProbDist}.
"""
return '<ELEProbDist based on %d samples>' % self._freqdist.N()
class HeldoutProbDist(ProbDistI):
"""
The heldout estimate for the probability distribution of the
experiment used to generate two frequency distributions. These
two frequency distributions are called the "heldout frequency
distribution" and the "base frequency distribution." The
X{heldout estimate} uses uses the X{heldout frequency
distribution} to predict the probability of each sample, given its
frequency in the X{base frequency distribution}.
In particular, the heldout estimate approximates the probability
for a sample that occurs M{r} times in the base distribution as
the average frequency in the heldout distribution of all samples
that occur M{r} times in the base distribution.
This average frequency is M{Tr[r]/(Nr[r]*N)}, where:
- M{Tr[r]} is the total count in the heldout distribution for
all samples that occur M{r} times in the base
distribution.
- M{Nr[r]} is the number of samples that occur M{r} times in
the base distribution.
- M{N} is the number of outcomes recorded by the heldout
frequency distribution.
In order to increase the efficiency of the C{prob} member
function, M{Tr[r]/(Nr[r]*N)} is precomputed for each value of M{r}
when the C{HeldoutProbDist} is created.
@type _estimate: C{list} of C{float}
@ivar _estimate: A list mapping from M{r}, the number of
times that a sample occurs in the base distribution, to the
probability estimate for that sample. C{_estimate[M{r}]} is
calculated by finding the average frequency in the heldout
distribution of all samples that occur M{r} times in the base
distribution. In particular, C{_estimate[M{r}]} =
M{Tr[r]/(Nr[r]*N)}.
@type _max_r: C{int}
@ivar _max_r: The maximum number of times that any sample occurs
in the base distribution. C{_max_r} is used to decide how
large C{_estimate} must be.
"""
def __init__(self, base_fdist, heldout_fdist, bins=None):
"""
Use the heldout estimate to create a probability distribution
for the experiment used to generate C{base_fdist} and
C{heldout_fdist}.
@type base_fdist: C{FreqDist}
@param base_fdist: The base frequency distribution.
@type heldout_fdist: C{FreqDist}
@param heldout_fdist: The heldout frequency distribution.
@type bins: C{int}
@param bins: The number of sample values that can be generated
by the experiment that is described by the probability
distribution. This value must be correctly set for the
probabilities of the sample values to sum to one. If
C{bins} is not specified, it defaults to C{freqdist.B()}.
"""
assert _chktype(1, base_fdist, FreqDist)
assert _chktype(2, heldout_fdist, FreqDist)
assert _chktype(3, bins, types.IntType, types.NoneType)
self._base_fdist = base_fdist
self._heldout_fdist = heldout_fdist
# The max number of times any sample occurs in base_fdist.
self._max_r = base_fdist.count(base_fdist.max())
# Calculate Tr, Nr, and N.
Tr = self._calculate_Tr()
Nr = [base_fdist.Nr(r, bins) for r in range(self._max_r+1)]
N = heldout_fdist.N()
# Use Tr, Nr, and N to compute the probability estimate for
# each value of r.
self._estimate = self._calculate_estimate(Tr, Nr, N)
def _calculate_Tr(self):
"""
@return: the list M{Tr}, where M{Tr[r]} is the total count in
C{heldout_fdist} for all samples that occur M{r}
times in C{base_fdist}.
@rtype: C{list} of C{float}
"""
Tr = [0.0] * (self._max_r+1)
for sample in self._heldout_fdist.samples():
r = self._base_fdist.count(sample)
Tr[r] += self._heldout_fdist.count(sample)
return Tr
def _calculate_estimate(self, Tr, Nr, N):
"""
@return: the list M{estimate}, where M{estimate[r]} is the
probability estimate for any sample that occurs M{r} times
in the base frequency distribution. In particular,
M{estimate[r]} is M{Tr[r]/(N[r]*N)}. In the special case
that M{N[r]=0}, M{estimate[r]} will never be used; so we
define M{estimate[r]=None} for those cases.
@rtype: C{list} of C{float}
@type Tr: C{list} of C{float}
@param Tr: the list M{Tr}, where M{Tr[r]} is the total count in
the heldout distribution for all samples that occur M{r}
times in base distribution.
@type Nr: C{list} of C{float}
@param Nr: The list M{Nr}, where M{Nr[r]} is the number of
samples that occur M{r} times in the base distribution.
@type N: C{int}
@param N: The total number of outcomes recorded by the heldout
frequency distribution.
"""
estimate = []
for r in range(self._max_r+1):
if Nr[r] == 0: estimate.append(None)
else: estimate.append(Tr[r]/(Nr[r]*N))
return estimate
def base_fdist(self):
"""
@return: The base frequency distribution that this probability
distribution is based on.
@rtype: C{FreqDist}
"""
return self._base_fdist
def heldout_fdist(self):
"""
@return: The heldout frequency distribution that this
probability distribution is based on.
@rtype: C{FreqDist}
"""
return self._heldout_fdist
def prob(self, sample):
# Use our precomputed probability estimate.
r = self._base_fdist.count(sample)
return self._estimate[r]
def max(self):
# Note: the Heldout estimation is *not* necessarily monotonic;
# so this implementation is currently broken. However, it
# should give the right answer *most* of the time. :)
return self._base_fdist.max()
def __repr__(self):
"""
@rtype: C{string}
@return: A string representation of this C{ProbDist}.
"""
s = '<HeldoutProbDist: %d base samples; %d heldout samples>'
return s % (self._base_fdist.N(), self._heldout_fdist.N())
class CrossValidationProbDist(ProbDistI):
"""
The cross-validation estimate for the probability distribution of
the experiment used to generate a set of frequency distribution.
The X{cross-validation estimate} for the probability of a sample
is found by averaging the held-out estimates for the sample in
each pair of frequency distributions.
"""
def __init__(self, freqdists, bins):
"""
Use the cross-validation estimate to create a probability
distribution for the experiment used to generate
C{freqdists}.
@type freqdists: C{list} of C{FreqDist}
@param freqdists: A list of the frequency distributions
generated by the experiment.
@type bins: C{int}
@param bins: The number of sample values that can be generated
by the experiment that is described by the probability
distribution. This value must be correctly set for the
probabilities of the sample values to sum to one. If
C{bins} is not specified, it defaults to C{freqdist.B()}.
"""
assert _chktype(1, freqdists, [FreqDist], (FreqDist,))
assert _chktype(2, bins, types.IntType, types.NoneType)
self._freqdists = freqdists
# Create a heldout probability distribution for each pair of
# frequency distributions in freqdists.
self._heldout_probdists = []
for fdist1 in freqdists:
for fdist2 in freqdists:
if fdist1 is not fdist2:
probdist = HeldoutProbDist(fdist1, fdist2, bins)
self._heldout_probdists.append(probdist)
def freqdists(self):
"""
@rtype: C{list} of C{FreqDist}
@return: The list of frequency distributions that this
C{ProbDist} is based on.
"""
return self._freqdists
def prob(self, sample):
# Find the average probability estimate returned by each
# heldout distribution.
prob = 0.0
for heldout_probdist in self._heldout_probdists:
prob += heldout_probdist.prob(sample)
return prob/len(self._heldout_probdists)
def __repr__(self):
"""
@rtype: C{string}
@return: A string representation of this C{ProbDist}.
"""
return '<CrossValidationProbDist: %d-way>' % len(self._freqdists)
class WittenBellProbDist(ProbDistI):
"""
The Witten-Bell estimate of a probability distribution. This distribution
allocates uniform probability mass to as yet unseen events by using the
number of events that have only been seen once. The probability mass
reserved for unseen events is equal to:
- M{T / (N + T)}
where M{T} is the number of observed event types and M{N} is the total
number of observed events. This equates to the maximum likelihood estimate
of a new type event occuring. The remaining probability mass is discounted
such that all probability estimates sum to one, yielding:
- M{p = T / Z (N + T)}, if count = 0
- M{p = c / (N + T)}, otherwise
"""
def __init__(self, freqdist, bins=None):
"""
Creates a distribution of Witten-Bell probability estimates. This
distribution allocates uniform probability mass to as yet unseen
events by using the number of events that have only been seen once.
The probability mass reserved for unseen events is equal to:
- M{T / (N + T)}
where M{T} is the number of observed event types and M{N} is the total
number of observed events. This equates to the maximum likelihood
estimate of a new type event occuring. The remaining probability mass
is discounted such that all probability estimates sum to one,
yielding:
- M{p = T / Z (N + T)}, if count = 0
- M{p = c / (N + T)}, otherwise
The parameters M{T} and M{N} are taken from the C{freqdist} parameter
(the C{B()} and C{N()} values). The normalising factor M{Z} is
calculated using these values along with the C{bins} parameter.
@param freqdist: The frequency counts upon which to base the
estimation.
@type freqdist: C{FreqDist}
@param bins: The number of possible event types. This must be
at least as large as the number of bins in the
C{freqdist}. If C{None}, then it's assumed to be
equal to that of the C{freqdist}
@type bins: C{Int}
"""
assert _chktype(1, freqdist, FreqDist)
assert _chktype(2, bins, types.IntType, types.NoneType)
assert bins == None or bins >= freqdist.B(),\
'Bins parameter must not be less than freqdist.B()'
if bins == None:
bins = freqdist.B()
self._freqdist = freqdist
self._T = self._freqdist.B()
self._Z = bins - self._freqdist.B()
self._N = self._freqdist.N()
def prob(self, sample):
# inherit docs from ProbDistI
c = self._freqdist.count(sample)
if c == 0:
return self._T / float(self._Z * (self._N + self._T))
else:
return c / float(self._N + self._T)
def max(self):
return self._freqdist.max()
def samples(self):
return self._freqdist.samples()
def freqdist(self):
return self._freqdist
def __repr__(self):
"""
@rtype: C{string}
@return: A string representation of this C{ProbDist}.
"""
return '<WittenBellProbDist based on %d samples>' % self._freqdist.N()
class GoodTuringProbDist(ProbDistI):
"""
The Good-Turing estimate of a probability distribution. This method
calculates the probability mass to assign to events with zero or low
counts based on the number of events with higher counts. It does so by
using the smoothed count M{c*}:
- M{c* = (c + 1) N(c + 1) / N(c)}
where M{c} is the original count, M{N(i)} is the number of event types
observed with count M{i}. These smoothed counts are then normalised to
yield a probability distribution.
"""
# TODO - add a cut-off parameter, above which the counts are unmodified
# (see J&M p216)
def __init__(self, freqdist, bins):
"""
Creates a Good-Turing probability distribution estimate. This method
calculates the probability mass to assign to events with zero or low
counts based on the number of events with higher counts. It does so by
using the smoothed count M{c*}:
- M{c* = (c + 1) N(c + 1) / N(c)}
where M{c} is the original count, M{N(i)} is the number of event types
observed with count M{i}. These smoothed counts are then normalised to
yield a probability distribution.
The C{bins} parameter allows C{N(0)} to be estimated.
@param freqdist: The frequency counts upon which to base the
estimation.
@type freqdist: C{FreqDist}
@param bins: The number of possible event types. This must be
at least as large as the number of bins in the
C{freqdist}. If C{None}, then it's taken to be
equal to C{freqdist.B()}.
@type bins: C{Int}
"""
assert _chktype(1, freqdist, FreqDist)
assert _chktype(2, bins, types.IntType, types.NoneType)
assert bins == None or bins >= freqdist.B(),\
'Bins parameter must not be less than freqdist.B()'
if bins == None:
bins = freqdist.B()
self._freqdist = freqdist
self._bins = bins
def prob(self, sample):
# inherit docs from FreqDist
c = self._freqdist.count(sample)
nc = self._freqdist.Nr(c, self._bins)
ncn = self._freqdist.Nr(c + 1, self._bins)
return float(c + 1) * ncn / (nc * self._freqdist.N())
def max(self):
return self._freqdist.max()
def samples(self):
return self._freqdist.samples()
def freqdist(self):
return self._freqdist
def __repr__(self):
"""
@rtype: C{string}
@return: A string representation of this C{ProbDist}.
"""
return '<GoodTuringProbDist based on %d samples>' % self._freqdist.N()
class MutableProbDist(ProbDistI):
"""
An mutable probdist where the probabilities may be easily modified. This
simply copies an existing probdist, storing the probability values in a
mutable dictionary and providing an update method.
"""
def __init__(self, prob_dist, samples, store_logs=True):
"""
Creates the mutable probdist based on the given prob_dist and using
the list of samples given. These values are stored as log
probabilities if the store_logs flag is set.
@param prob_dist: the distribution from which to garner the
probabilities
@type prob_dist: ProbDist
@param samples: the complete set of samples
@type samples: sequence of any
@param store_logs: whether to store the probabilities as logarithms
@type store_logs: bool
"""
self._samples = samples
self._sample_dict = dict([(samples[i], i) for i in range(len(samples))])
self._data = numpy.zeros(len(samples), numpy.float64)
for i in range(len(samples)):
if store_logs:
self._data[i] = prob_dist.logprob(samples[i])
else:
self._data[i] = prob_dist.prob(samples[i])
self._logs = store_logs
def samples(self):
# inherit doco
return self._samples
def prob(self, sample):
# inherit doco
i = self._sample_dict.get(sample)
if i != None:
if self._logs:
return exp(self._data[i])
else:
return self._data[i]
else:
return 0.0
def logprob(self, sample):
# inherit doco
i = self._sample_dict.get(sample)
if i != None:
if self._logs:
return self._data[i]
else:
return log(self._data[i])
else:
return NINF
def update(self, sample, prob, log=True):
"""
Update the probability for the given sample. This may cause the object
to stop being the valid probability distribution - the user must
ensure that they update the sample probabilities such that all samples
have probabilities between 0 and 1 and that all probabilities sum to
one.
@param sample: the sample for which to update the probability
@type sample: any
@param prob: the new probability
@param prob: float
@param log: is the probability already logged
@param log: bool
"""
i = self._sample_dict.get(sample)
assert i != None
if self._logs:
if log: self._data[i] = prob
else: self._data[i] = log(prob)
else:
if log: self._data[i] = exp(prob)
else: self._data[i] = prob
##//////////////////////////////////////////////////////
## Probability Distribution Operations
##//////////////////////////////////////////////////////
def log_likelihood(test_pdist, actual_pdist):
# Is this right?
return sum([actual_pdist.prob(s) * math.log(test_pdist.prob(s))
for s in actual_pdist.samples()])
##//////////////////////////////////////////////////////
## Conditional Distributions
##//////////////////////////////////////////////////////
class ConditionalFreqDist:
"""
A collection of frequency distributions for a single experiment
run under different conditions. Conditional frequency
distributions are used to record the number of times each sample
occured, given the condition under which the experiment was run.
For example, a conditional frequency distribution could be used to
record the frequency of each word type in a document, given the
length of the word type. Formally, a conditional frequency
distribution can be defined as a function that maps from each
condition to the C{FreqDist} for the experiment under that
condition.
The frequency distribution for each condition is accessed using
the indexing operator:
>>> cfdist[3]
<FreqDist with 73 outcomes>
>>> cfdist[3].freq('the')
0.4
>>> cfdist[3].count('dog')
2
When the indexing operator is used to access the frequency
distribution for a condition that has not been accessed before,
C{ConditionalFreqDist} creates a new empty C{FreqDist} for that
condition.
Conditional frequency distributions are typically constructed by
repeatedly running an experiment under a variety of conditions,
and incrementing the sample outcome counts for the appropriate
conditions. For example, the following code will produce a
conditional frequency distribution that encodes how often each
word type occurs, given the length of that word type:
>>> cfdist = ConditionalFreqDist()
>>> for token in text_token['SUBTOKENS']:
... condition = len(token['TEXT'])
... cfdist[condition].inc(token['TEXT'])
"""
def __init__(self):
"""
Construct a new empty conditional frequency distribution. In
particular, the count for every sample, under every condition,
is zero.
"""
self._fdists = {}
def __getitem__(self, condition):
"""
Return the frequency distribution that encodes the frequency
of each sample outcome, given that the experiment was run
under the given condition. If the frequency distribution for
the given condition has not been accessed before, then this
will create a new empty C{FreqDist} for that condition.
@return: The frequency distribution that encodes the frequency
of each sample outcome, given that the experiment was run
under the given condition.
@rtype: C{FreqDist}
@param condition: The condition under which the experiment was
run.
@type condition: any
"""
# Create the conditioned freq dist, if it doesn't exist
if not self._fdists.has_key(condition):
self._fdists[condition] = FreqDist()
return self._fdists[condition]
def conditions(self):
"""
@return: A list of the conditions that have been accessed for
this C{ConditionalFreqDist}. Use the indexing operator to
access the frequency distribution for a given condition.
Note that the frequency distributions for some conditions
may contain zero sample outcomes.
@rtype: C{list}
"""
return self._fdists.keys()
def __repr__(self):
"""
@return: A string representation of this
C{ConditionalFreqDist}.
@rtype: C{string}
"""
n = len(self._fdists)
return '<ConditionalFreqDist with %d conditions>' % n
class ConditionalProbDistI:
"""
A collection of probability distributions for a single experiment
run under different conditions. Conditional probability
distributions are used to estimate the likelihood of each sample,
given the condition under which the experiment was run. For
example, a conditional probability distribution could be used to
estimate the probability of each word type in a document, given
the length of the word type. Formally, a conditional probability
distribution can be defined as a function that maps from each
condition to the C{ProbDist} for the experiment under that
condition.
"""
def __init__(self):
raise AssertionError, 'ConditionalProbDistI is an interface'
def __getitem__(self, condition):
"""
@return: The probability distribution for the experiment run
under the given condition.
@rtype: C{ProbDistI}
@param condition: The condition whose probability distribution
should be returned.
@type condition: any
"""
raise AssertionError
def conditions(self):
"""
@return: A list of the conditions that are represented by
this C{ConditionalProbDist}. Use the indexing operator to
access the probability distribution for a given condition.
@rtype: C{list}
"""
raise AssertionError
# For now, this is the only implementation of ConditionalProbDistI;
# but we would want a different implementation if we wanted to build a
# conditional probability distribution analytically (e.g., a gaussian
# distribution), rather than basing it on an underlying frequency
# distribution.
class ConditionalProbDist(ConditionalProbDistI):
"""
A conditional probability distribution modelling the experiments
that were used to generate a conditional frequency distribution.
A C{ConditoinalProbDist} is constructed from a
C{ConditionalFreqDist} and a X{C{ProbDist} factory}:
- The B{C{ConditionalFreqDist}} specifies the frequency
distribution for each condition.
- The B{C{ProbDist} factory} is a function that takes a
condition's frequency distribution, and returns its
probability distribution. A C{ProbDist} class's name (such as
C{MLEProbDist} or C{HeldoutProbDist}) can be used to specify
that class's constructor.
The first argument to the C{ProbDist} factory is the frequency
distribution that it should model; and the remaining arguments are
specified by the C{factory_args} parameter to the
C{ConditionalProbDist} constructor. For example, the following
code constructs a C{ConditionalProbDist}, where the probability
distribution for each condition is an C{ELEProbDist} with 10 bins:
>>> cpdist = ConditionalProbDist(cfdist, ELEProbDist, 10)
>>> print cpdist['run'].max()
'NN'
>>> print cpdist['run'].prob('NN')
0.0813
"""
def __init__(self, cfdist, probdist_factory,
supply_condition=False, *factory_args):
"""
Construct a new conditional probability distribution, based on
the given conditional frequency distribution and C{ProbDist}
factory.
@type cfdist: L{ConditionalFreqDist}
@param cfdist: The C{ConditionalFreqDist} specifying the
frequency distribution for each condition.
@type probdist_factory: C{class} or C{function}
@param probdist_factory: The function or class that maps
a condition's frequency distribution to its probability
distribution. The function is called with the frequency
distribution as its first argument, the condition as its
second argument (only if C{supply_condition=True}), and
C{factory_args} as its remaining arguments.
@type supply_condition: C{bool}
@param supply_condition: If true, then pass the condition as
the second argument to C{probdist_factory}.
@type factory_args: (any)
@param factory_args: Extra arguments for C{probdist_factory}.
These arguments are usually used to specify extra
properties for the probability distributions of individual
conditions, such as the number of bins they contain.
"""
assert _chktype(1, cfdist, ConditionalFreqDist)
assert _chktype(2, probdist_factory, types.FunctionType,
types.BuiltinFunctionType, types.MethodType,
types.ClassType)
assert _chktype(3, supply_condition, bool)
self._probdist_factory = probdist_factory
self._cfdist = cfdist
self._supply_condition = supply_condition
self._factory_args = factory_args
self._pdists = {}
for c in cfdist.conditions():
if supply_condition:
pdist = probdist_factory(cfdist[c], c, *factory_args)
else:
pdist = probdist_factory(cfdist[c], *factory_args)
self._pdists[c] = pdist
def __getitem__(self, condition):
if not self._pdists.has_key(condition):
# If it's a condition we haven't seen, create a new prob
# dist from the empty freq dist. Typically, this will
# give a uniform prob dist.
pdist = self._probdist_factory(FreqDist(), *self._factory_args)
self._pdists[condition] = pdist
return self._pdists[condition]
def conditions(self):
return self._pdists.keys()
def __repr__(self):
"""
@return: A string representation of this
C{ConditionalProbDist}.
@rtype: C{string}
"""
n = len(self._pdists)
return '<ConditionalProbDist with %d conditions>' % n
class DictionaryConditionalProbDist(ConditionalProbDistI):
"""
An alternative ConditionalProbDist that simply wraps a dictionary of
ProbDists rather than creating these from FreqDists.
"""
def __init__(self, probdist_dict):
"""
@param probdist_dict: a dictionary containing the probdists indexed
by the conditions
@type probdist_dict: dict any -> probdist
"""
self._dict = probdist_dict
def __getitem__(self, condition):
# inherit doco
# this will cause an exception for unseen conditions
return self._dict[condition]
def conditions(self):
# inherit doco
return self._dict.keys()
##//////////////////////////////////////////////////////
## Probabilistic Mix-in
##//////////////////////////////////////////////////////
class ProbabilisticMixIn:
"""
A mix-in class to associate probabilities with other classes
(trees, rules, etc.). To use the C{ProbabilisticMixIn} class,
define a new class that derives from an existing class and from
ProbabilisticMixIn. You will need to define a new constructor for
the new class, which explicitly calls the constructors of both its
parent classes. For example:
>>> class A:
... def __init__(self, x, y): self.data = (x,y)
...
>>> class ProbabilisticA(A, ProbabilisticMixIn):
... def __init__(self, x, y, **prob_kwarg):
... A.__init__(self, x, y)
... ProbabilisticMixIn.__init__(self, **prob_kwarg)
See the documentation for the ProbabilisticMixIn
L{constructor<__init__>} for information about the arguments it
expects.
You should generally also redefine the string representation
methods, the comparison methods, and the hashing method.
"""
def __init__(self, **kwargs):
"""
Initialize this object's probability. This initializer should
be called by subclass constructors. C{prob} should generally be
the first argument for those constructors.
@kwparam prob: The probability associated with the object.
@type prob: C{float}
@kwparam logprob: The log of the probability associated with
the object.
@type logrpob: C{float}
"""
if 'prob' in kwargs:
if 'logprob' in kwargs:
raise TypeError('Must specify either prob or logprob '
'(not both)')
else:
ProbabilisticMixIn.set_prob(self, kwargs['prob'])
elif 'logprob' in kwargs:
ProbabilisticMixIn.set_logprob(self, kwargs['logprob'])
else:
self.__prob = self.__logprob = None
def set_prob(self, prob):
"""
Set the probability associated with this object to C{prob}.
@param prob: The new probability
@type prob: C{float}
"""
assert _chktype(1, prob, types.IntType, types.FloatType)
self.__prob = prob
self.__logprob = None
def set_logprob(self, logprob):
"""
Set the log probability associated with this object to
C{logprob}. I.e., set the probability associated with this
object to C{exp(logprob)}.
@param logprob: The new log probability
@type logprob: C{float}
"""
assert _chktype(1, prob, types.IntType, types.FloatType)
self.__logprob = prob
self.__prob = None
def prob(self):
"""
@return: The probability associated with this object.
@rtype: C{float}
"""
if self.__prob is None:
if self.__logprob is None: return None
self.__prob = math.exp(self.__logprob)
return self.__prob
def logprob(self):
"""
@return: C{log(p)}, where C{p} is the probability associated
with this object.
@rtype: C{float}
"""
if self.__logprob is None:
if self.__prob is None: return None
self.__logprob = math.log(self.__prob)
return self.__logprob
class ImmutableProbabilisticMixIn(ProbabilisticMixIn):
def set_prob(self, prob):
raise ValueError, '%s is immutable' % self.__class__.__name__
def set_logprob(self, prob):
raise ValueError, '%s is immutable' % self.__class__.__name__
##//////////////////////////////////////////////////////
## Demonstration
##//////////////////////////////////////////////////////
def _create_rand_fdist(numsamples, numoutcomes):
"""
Create a new frequency distribution, with random samples. The
samples are numbers from 1 to C{numsamples}, and are generated by
summing two numbers, each of which has a uniform distribution.
"""
import random
from math import sqrt
fdist = FreqDist()
for x in range(numoutcomes):
y = (random.randint(1, (1+numsamples)/2) +
random.randint(0, numsamples/2))
fdist.inc(y)
return fdist
def _create_sum_pdist(numsamples):
"""
Return the true probability distribution for the experiment
C{_create_rand_fdist(numsamples, x)}.
"""
fdist = FreqDist()
for x in range(1, (1+numsamples)/2+1):
for y in range(0, numsamples/2+1):
fdist.inc(x+y)
return MLEProbDist(fdist)
def demo(numsamples=6, numoutcomes=500):
"""
A demonstration of frequency distributions and probability
distributions. This demonstration creates three frequency
distributions with, and uses them to sample a random process with
C{numsamples} samples. Each frequency distribution is sampled
C{numoutcomes} times. These three frequency distributions are
then used to build six probability distributions. Finally, the
probability estimates of these distributions are compared to the
actual probability of each sample.
@type numsamples: C{int}
@param numsamples: The number of samples to use in each demo
frequency distributions.
@type numoutcomes: C{int}
@param numoutcomes: The total number of outcomes for each
demo frequency distribution. These outcomes are divided into
C{numsamples} bins.
@rtype: C{None}
"""
assert _chktype(1, numsamples, types.IntType)
assert _chktype(2, numoutcomes, types.IntType)
# Randomly sample a stochastic process three times.
fdist1 = _create_rand_fdist(numsamples, numoutcomes)
fdist2 = _create_rand_fdist(numsamples, numoutcomes)
fdist3 = _create_rand_fdist(numsamples, numoutcomes)
# Use our samples to create probability distributions.
pdists = [
MLEProbDist(fdist1),
LidstoneProbDist(fdist1, 0.5, numsamples),
HeldoutProbDist(fdist1, fdist2, numsamples),
HeldoutProbDist(fdist2, fdist1, numsamples),
CrossValidationProbDist([fdist1, fdist2, fdist3], numsamples),
_create_sum_pdist(numsamples),
]
# Find the probability of each sample.
vals = []
for n in range(1,numsamples+1):
vals.append(tuple([n, fdist1.freq(n)] +
[pdist.prob(n) for pdist in pdists]))
# Print the results in a formatted table.
print ('%d samples (1-%d); %d outcomes were sampled for each FreqDist' %
(numsamples, numsamples, numoutcomes))
print '='*9*(len(pdists)+2)
FORMATSTR = ' FreqDist '+ '%8s '*(len(pdists)-1) + '| Actual'
print FORMATSTR % tuple([`pdist`[1:9] for pdist in pdists[:-1]])
print '-'*9*(len(pdists)+2)
FORMATSTR = '%3d %8.6f ' + '%8.6f '*(len(pdists)-1) + '| %8.6f'
for val in vals:
print FORMATSTR % val
# Print the totals for each column (should all be 1.0)
zvals = zip(*vals)
def sum(lst): return reduce(lambda x,y:x+y, lst, 0)
sums = [sum(val) for val in zvals[1:]]
print '-'*9*(len(pdists)+2)
FORMATSTR = 'Total ' + '%8.6f '*(len(pdists)) + '| %8.6f'
print FORMATSTR % tuple(sums)
print '='*9*(len(pdists)+2)
# Display the distributions themselves, if they're short enough.
if len(`str(fdist1)`) < 70:
print ' fdist1:', str(fdist1)
print ' fdist2:', str(fdist2)
print ' fdist3:', str(fdist3)
print
if __name__ == '__main__':
demo(6, 10)
demo(5, 5000)
|
ronaldahmed/robot-navigation
|
neural-navigation-with-lstm/MARCO/nltk/probability.py
|
Python
|
mit
| 60,530
|
[
"Gaussian"
] |
41bd63550b6481e81e967d0d51a03586832bc7e6673163c01471e4333246d670
|
#
# Copyright (C) 2013-2018 The ESPResSo project
#
# This file is part of ESPResSo.
#
# ESPResSo is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ESPResSo is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# Tests particle property setters/getters
from __future__ import print_function
import unittest as ut
import espressomd
import numpy as np
from numpy.random import random
from espressomd.interactions import FeneBond
import espressomd.observables
def calc_com_x(system, x):
if espressomd.has_features(["MASS"]):
com_x = np.average(
getattr(system.part[:], x), weights=system.part[:].mass, axis=0)
else:
com_x = np.average(getattr(system.part[:], x), axis=0)
return com_x
class Observables(ut.TestCase):
N_PART = 1000
# Handle for espresso system
system = espressomd.System(box_l=[10.0, 10.0, 10.0])
system.seed = system.cell_system.get_state()['n_nodes'] * [1234]
def setUp(self):
if not self.system.part:
for i in range(self.N_PART):
self.system.part.add(pos=random(3) * 10, v=random(3), id=i)
if espressomd.has_features(["MASS"]):
self.system.part[i].mass = random()
if espressomd.has_features(["DIPOLES"]):
self.system.part[i].dip = random(3)
if espressomd.has_features(["ROTATION"]):
self.system.part[i].omega_lab = random(3)
if espressomd.has_features("ELECTROSTATICS"):
self.system.part[i].q = (1 if i % 2 == 0 else -1)
def generate_test_for_pid_observable(
_obs_name, _pprop_name, _agg_type=None):
"""Generates test cases for observables working on particle id lists.
"""
pprop_name = _pprop_name
obs_name = _obs_name
agg_type = _agg_type
def func(self):
# This code is run at the execution of the generated function.
# It will use the state of the variables in the outer function,
# which was there, when the outer function was called
# Get data from particles
id_list = range(self.N_PART)
part_data = getattr(self.system.part[id_list], pprop_name)
# Reshape and aggregate to linear array
if len(part_data.shape) > 1:
if (agg_type == "average"):
part_data = average(part_data, 0)
if (agg_type == "sum"):
part_data = sum(part_data, 0)
if (agg_type == 'com'):
part_data = calc_com_x(self.system, pprop_name)
part_data = part_data.flatten()
# Data from observable
observable = obs_name(ids=id_list)
obs_data = observable.calculate()
self.assertEqual(observable.n_values(), len(part_data))
np.testing.assert_array_almost_equal(
obs_data,
part_data, err_msg="Data did not agree for observable " +
str(obs_name) +
" and particle property " +
pprop_name, decimal=9)
return func
test_pos = generate_test_for_pid_observable(
espressomd.observables.ParticlePositions, "pos")
test_v = generate_test_for_pid_observable(
espressomd.observables.ParticleVelocities, "v")
test_f = generate_test_for_pid_observable(
espressomd.observables.ParticleForces, "f")
test_com_position = generate_test_for_pid_observable(
espressomd.observables.ComPosition, 'pos', 'com')
test_com_velocity = generate_test_for_pid_observable(
espressomd.observables.ComVelocity, 'v', 'com')
test_com_force = generate_test_for_pid_observable(
espressomd.observables.ComForce, 'f', 'com')
if espressomd.has_features(["DIPOLES"]):
test_mag_dip = generate_test_for_pid_observable(
espressomd.observables.MagneticDipoleMoment, "dip", "sum")
if espressomd.has_features(["ROTATION"]):
test_body_angular_velocity = generate_test_for_pid_observable(
espressomd.observables.ParticleBodyAngularVelocities, "omega_body")
test_lab_angular_velocity = generate_test_for_pid_observable(
espressomd.observables.ParticleAngularVelocities, "omega_lab")
def test_particle_body_velocities(self):
obs = espressomd.observables.ParticleBodyVelocities(
ids=range(self.N_PART))
obs_data = obs.calculate()
part_data = np.array([p.convert_vector_space_to_body(p.v)
for p in self.system.part])
np.testing.assert_array_almost_equal(part_data.flatten(), obs_data,
err_msg="Data did not agree for observable ParticleBodyVelocities and particle derived values.",
decimal=9)
def test_stress_tensor(self):
s = self.system.analysis.stress_tensor()["total"].reshape(9)
obs_data = np.array(espressomd.observables.StressTensor().calculate())
self.assertEqual(
espressomd.observables.StressTensor().n_values(), len(s))
np.testing.assert_array_almost_equal(
s,
obs_data,
err_msg="Stress tensor from analysis and observable did not agree",
decimal=9)
@ut.skipIf(not espressomd.has_features('ELECTROSTATICS'), "Skipping test for Current observable due to missing features.")
def test_current(self):
obs_data = espressomd.observables.Current(
ids=range(self.N_PART)).calculate()
part_data = self.system.part[:].q.dot(self.system.part[:].v)
self.assertEqual(espressomd.observables.Current(
ids=range(self.N_PART)).n_values(), len(part_data.flatten()))
np.testing.assert_array_almost_equal(
obs_data, part_data, err_msg="Data did not agree for observable 'Current'", decimal=9)
@ut.skipIf(not espressomd.has_features('ELECTROSTATICS'), "Skipping test for DipoleMoment observable due to missing features.")
def test_dipolemoment(self):
obs = espressomd.observables.DipoleMoment(ids=range(self.N_PART))
obs_data = obs.calculate()
part_data = self.system.part[:].q.dot(self.system.part[:].pos)
self.assertEqual(obs.n_values(), len(part_data.flatten()))
np.testing.assert_array_almost_equal(
obs_data, part_data, err_msg="Data did not agree for observable 'DipoleMoment'", decimal=9)
if __name__ == "__main__":
ut.main()
|
hmenke/espresso
|
testsuite/python/observables.py
|
Python
|
gpl-3.0
| 7,120
|
[
"ESPResSo"
] |
1dd94b7fbe2b0f2f74c66b4a42cad9f90aff417a6c1767b20c4b00c04166bc01
|
import numpy as np
from scipy.io import netcdf
def genNetCDF(wavelength, stI, stQ, stU, stV, sigmaI, sigmaQ, sigmaU, sigmaV, boundary, height, obsTheta, obsGamma, mask, pars, outputFile):
"""
This routine generates a NetCDF file with the observations ready for Hazel-MPI
Args:
wavelength (float): array of size [nlambda]
stI (float): array of size [npixel, nlambda] with Stokes I
stQ (float): array of size [npixel, nlambda] with Stokes Q
stU (float): array of size [npixel, nlambda] with Stokes U
stV (float): array of size [npixel, nlambda] with Stokes V
sigmaI (float): array of size [npixel, nlambda] with the noise in Stokes I
sigmaQ (float): array of size [npixel, nlambda] with the noise in Stokes Q
sigmaU (float): array of size [npixel, nlambda] with the noise in Stokes U
sigmaV (float): array of size [npixel, nlambda] with the noise in Stokes V
boundary (float): array of size [npixel, 4] with the boundary conditions [I0,Q0,U0,V0] for every pixel
height (float): array of size [npixel] indicating the height of the pixel over the surface in arcsec
obsTheta (float): array of size [npixel] indicating the angle of the observation in degrees
obsGamma (float): array of size [npixel] the angle of the reference for Stokes Q
mask (float): array of the original dimensions of the observations that is used later to reconstruct the inverted maps [nx,ny]
pars (float): array of size [npixel,nparameters] that gives the initial value of the parameters
The size depends on the radiative transfer option:
* 1-component (vector of size 8): B, thetaB, chiB, tau, vdop, a, vmac, beta
* 2-component 1+1 with same field (vector of size 10): B, thetaB, chiB, tau1, tau2, vdop, a, vmac1, vmac2, beta
* 2-component 1+1 with different field (vector of size 14): B1, thetaB1, chiB1, B2, thetaB2, chiB2, tau1, tau2, vdop1, vdop2, a, vmac1, vmac2, beta
* 2-component 2 with different field with ff (vector of size 14): B1, thetaB1, chiB1, B2, thetaB2, chiB2, tau1, tau2, vdop1, vdop2, a, vmac1, vmac2, ff
outputFile (float): output file
"""
nPixel, nLambda = stI.shape
nCols = 8
obsMap = np.zeros((8,nLambda,nPixel))
obsMap[0,:,:] = stI.T
obsMap[1,:,:] = stQ.T
obsMap[2,:,:] = stU.T
obsMap[3,:,:] = stV.T
obsMap[4,:,:] = sigmaI.T
obsMap[5,:,:] = sigmaQ.T
obsMap[6,:,:] = sigmaU.T
obsMap[7,:,:] = sigmaV.T
obsMap = np.transpose(obsMap,axes=(2,1,0))
dimMap = mask.shape
# Variable dimensions
fileID = netcdf.netcdf_file(outputFile, 'w')
nPixDim = fileID.createDimension('npixel', nPixel)
nColDim = fileID.createDimension('ncolumns', nCols)
nStokesParDim = fileID.createDimension('nstokes_par', 4)
nParsDim = fileID.createDimension('nparameters', 9)
nLambdaDim = fileID.createDimension('nlambda', nLambda)
nXDim = fileID.createDimension('nx', dimMap[0])
nYDim = fileID.createDimension('ny', dimMap[1])
# Variable definition
# Remember that variables are written in C format, so that they are reversed with respect to Fortran
lambdaID = fileID.createVariable('lambda','f8',('nlambda',))
stokesID = fileID.createVariable('map','f8',('npixel','nlambda','ncolumns',))
boundaryID = fileID.createVariable('boundary','f8',('npixel','nstokes_par',))
heightID = fileID.createVariable('height','f8',('npixel',))
obsThetaID = fileID.createVariable('obs_theta','f8',('npixel',))
obsGammaID = fileID.createVariable('obs_gamma','f8',('npixel',))
maskID = fileID.createVariable('mask','i2',('nx','ny',))
parsInitID = fileID.createVariable('pars_initial','f8',('npixel','nparameters',))
lambdaID[:] = wavelength
stokesID[:] = obsMap
boundaryID[:] = boundary.T
heightID[:] = height
obsThetaID[:] = obsTheta
obsGammaID[:] = obsGamma
maskID[:] = mask
parsInitID[:] = pars.T
fileID.close()
|
aasensio/pyAndres
|
genNetCDF.py
|
Python
|
mit
| 3,845
|
[
"NetCDF"
] |
81109225fc069d104d2a24448afdce4d8deee700c53ae44ff8f782b6d9b58905
|
"""Install Fall3d-6.2 on Linux platform
An environment variable, FALL3DHOME, may be specified to determine
the location of the Fall3d installation.
If it is not specified, Fall3d will be installed within the AIM source tree.
"""
#----------------------------------------
# Auxiliary modules and functions
#----------------------------------------
import os
import sys
from utilities import makedir, run, header, get_shell, set_bash_variable, pipe
from config import update_marker, compiler, modules, makefile_content
from config import make_configuration_filename, make_configuration_content
from config import fall3d_distro, url, tarball
def replace_string_in_file(filename, s1, s2, verbose=False):
"""Replace string s1 with string s2 in filename
"""
# Read data from filename
infile = open(filename)
lines = infile.readlines()
infile.close()
# Replace and store updated versions
outfile = open(filename, 'w')
for s in lines:
new_string = s.replace(s1, s2).rstrip()
if new_string.strip() != s.strip() and verbose:
print 'Replaced %s with %s' % (s, new_string)
outfile.write(new_string + '\n')
outfile.close()
#def set_compiler(filename):
# replace_string_in_file(filename, 'FC= ifort', '#FC= ifort')
# replace_string_in_file(filename, 'LINKER= ifort', '#LINKER= ifort')
# replace_string_in_file(filename, 'FFLAGS= -132', '#FFLAGS= -132')
# replace_string_in_file(filename, 'LINKFLAGS= -132', '#LINKFLAGS= -132')
#
# replace_string_in_file(filename, '#FC= gfortran', 'FC= gfortran')
# replace_string_in_file(filename, '#LINKER= gfortran', 'LINKER= gfortran')
# replace_string_in_file(filename, '#FFLAGS= -ffixed', 'FFLAGS= -ffixed')
# replace_string_in_file(filename, '#LINKFLAGS= -ffixed', 'LINKFLAGS= -ffixed')
if __name__ == '__main__':
#----------------------------------------
# Check that gfortran compiler is present
#----------------------------------------
err = os.system('gfortran -v 2> /dev/null')
if err != 0:
msg = 'Compiler gfortran must be present\n'
msg += 'On Ubuntu/Debian systems this can be done as follows\n'
msg += 'sudo apt-get install gfortran'
raise Exception(msg)
#------------------------------------------
# Check that Python and friends are present
#------------------------------------------
err = os.system('python -V 2> /dev/null')
if err != 0:
msg = 'Python must be present\n'
msg += 'On Ubuntu/Debian systems this can be done as follows\n'
msg += 'sudo apt-get install python'
raise Exception(msg)
err = os.system('python -c "import numpy" 2> /dev/null')
if err != 0:
msg = 'Python module numpy must be present\n'
msg += 'On Ubuntu/Debian systems this can be done as follows\n'
msg += 'sudo apt-get install python-numpy'
raise Exception(msg)
err = os.system('python -c "import osgeo" 2> /dev/null')
if err != 0:
msg = 'Python module python-gdal must be present\n'
msg += 'On Ubuntu/Debian systems this can be done as follows\n'
msg += 'sudo apt-get install python-gdal'
raise Exception(msg)
err = os.system('gdalinfo --help-general > /dev/null 2> /dev/null')
if err != 0:
msg = 'GDAL must be present\n'
msg += 'On Ubuntu/Debian systems this can be done as follows\n'
msg += 'sudo apt-get install gdal-bin'
raise Exception(msg)
if not os.path.isfile('/usr/include/netcdf.mod'):
msg = 'The NetCDF library must be present\n'
msg += 'On Ubuntu/Debian systems this can be done as follows\n'
msg += 'sudo apt-get install libnetcdf-dev'
raise Exception(msg)
err = os.system('python -c "import Scientific.IO.NetCDF" 2> /dev/null')
if err != 0:
msg = 'Python module python-scientific must be present\n'
msg += 'On Ubuntu/Debian systems this can be done as follows\n'
msg += 'sudo apt-get install python-scientific'
raise Exception(msg)
#--------------------------------------------------------------------
# Determine AIMHOME - this is what needs to be assigned to PYTHONPATH
#--------------------------------------------------------------------
#cwd = os.getcwd() # getcwd follow symlinks, so it is better to use pwd
p = pipe('pwd')
cwd = p.stdout.read().strip()
AIMHOME = os.path.split(cwd)[0] # Parent dir
print 'AIMHOME determined to be', AIMHOME
#-----------------------------
# Verify environment variables
#-----------------------------
modified = False
ok_to_modify = False
askuser = True
for envvar in ['FALL3DHOME', 'PYTHONPATH', 'TEPHRADATA']:
variable_set = False
if os.environ.has_key(envvar):
# In case of PYTHONPATH check that it is correct
if envvar == 'PYTHONPATH':
pythonpath = os.environ['PYTHONPATH']
paths = pythonpath.split(':')
for path in paths:
try:
files = os.listdir(path)
except:
pass
else:
if 'aim' in files:
files = os.listdir(os.path.join(path, 'aim'))
if __file__ in files:
variable_set = True
else:
# Otherwise, just verify that it has been set.
value = os.environ[envvar]
if value:
variable_set = True
if not variable_set:
print 'Environment variable %s has not been set' % envvar
# If we are using the bash shell ask for permission to modify .bashrc
if get_shell() == 'bash':
if not ok_to_modify and askuser:
answer = raw_input('Would you like me to update your .bashrc file with reasonable default values? (Y,N)[Y]')
askuser = False # Don't ask again
if answer.lower() == 'n':
print 'OK - you may want to set this variable later'
ok_to_modify = False
else:
print 'OK - updated lines in ~/.bashrc will be marked with %s' % update_marker
ok_to_modify = True
print
if ok_to_modify:
if envvar == 'PYTHONPATH':
# We already know what it should be
envvalue = AIMHOME
elif envvar == 'FALL3DHOME':
# Use ~/fall3d as default
envvalue = os.path.expanduser('~/fall3d')
elif envvar == 'TEPHRADATA':
if os.path.isdir('/model_area'):
# Use /model_area/tephra as default if possible
makedir('/model_area/tephra')
envvalue = '/model_area/tephra'
else:
# Otherwise use ~/tephra as default
envvalue = os.path.expanduser('~/tephra')
# Modify .bashrc
print 'Setting environment variable %s to %s' % (envvar, envvalue)
set_bash_variable(envvar, envvalue)
modified = True
# Also assign variables for the rest of this session
os.environ[envvar] = envvalue
print
if modified:
print 'Bash configuration file ~/.bashrc has been modified'
print 'You can change it manually if you wish.'
print
#---------------------
# Determine FALL3DHOME
#---------------------
if 'FALL3DHOME' in os.environ:
FALL3DHOME = os.environ['FALL3DHOME']
else:
FALL3DHOME = os.getcwd()
header('Fall3d will be installed in %s' % FALL3DHOME)
makedir(FALL3DHOME)
os.chdir(FALL3DHOME)
#----------------
# Download Fall3d version 6 (public version)
# http://www.bsc.es/projects/earthscience/fall3d/Downloads/Fall3d-PUB.tar.gz
#----------------
path = os.path.join(url, tarball)
if not os.path.isfile(tarball):
# FIXME: Should also check integrity of tgz file.
cmd = 'wget ' + path
run(cmd, verbose=True)
#----------------------------------------
# Start installation procedure in earnest
#----------------------------------------
# Cleanup
s = '/bin/rm -rf %s' % fall3d_distro
run(s, verbose=False)
print 'Unpacking tarball: %s/%s' % (FALL3DHOME, tarball)
print
# Unpack FALL3D using tar:
#
# x: Extract
# v: Be verbose
# f: Filename coming up
# z: Uncompress as well
#
err = run('tar xvfz %s > /dev/null' % tarball, verbose=True)
if err != 0:
msg = 'Could not unpack %s' % tarball
raise Exception(msg)
# Get origin directory
os.chdir(fall3d_distro)
fall3dpath = os.getcwd()
#----------
# Makefiles
#----------
# Generate common makefile configuration
make_configuration = os.path.join(fall3dpath, 'Install', make_configuration_filename)
fid = open(make_configuration, 'w')
fid.write(make_configuration_content % (compiler, fall3dpath, fall3dpath) )
fid.close()
# Generate and run specific makefiles
for program in ['LibMaster', 'MergeNCEP1', 'SetDbs', 'SetGrn', 'SetSrc', 'Sources_ser']:
mod = modules[program]
if mod.file is None:
# Generate standard makefile
fid = open(os.path.join(mod.path, 'Makefile'), 'w')
fid.write(makefile_content % (fall3dpath, make_configuration_filename, mod.mods, mod.objs, mod.prog))
fid.close()
else:
# Use predefined makefile
makefile = os.path.join(mod.path, 'Makefile')
s = 'cp %s %s' % (os.path.join(cwd, mod.file), makefile)
print 'CMD', s
run(s, verbose=False)
# Patch include statement
replace_string_in_file(makefile,
'include <insert config>',
'include %s' % make_configuration,
verbose=False)
sys.stdout.write('Compiling %s: ' % program)
run('cd %s; make' % mod.path,
stdout=os.path.join(cwd, 'make_%s.stdout' % program),
stderr=os.path.join(cwd, 'make_%s.stderr' % program),
verbose=False)
#-----------------------------
# Test presence of executables
#-----------------------------
p = mod.path.split(os.sep)
# Strip last dir off path as that is where makefiles put targets
if len(p) > 1:
p = os.path.join(*p[:-1])
else:
p = ''
f = os.path.join(fall3dpath, p, mod.prog)
if os.path.isfile(f):
res = 'OK'
else:
res = 'FAILED'
print('%s' % res)
#----------------------------------------------------------------------------
# Compile and install post-processing source code which is not part of Fall3d
#----------------------------------------------------------------------------
# Generate and run specific makefiles
for program in ['nc2grd', 'nc2prof', 'HazardMaps']:
mod = modules[program]
# Move sources into Fall3d structure
origin_dir = os.path.join(cwd, mod.path)
target_dir = os.path.join(FALL3DHOME, fall3d_distro, 'Utilities', program)
source_dir = os.path.join(target_dir, 'Sources')
makedir(source_dir)
run('cp %s/* %s' % (origin_dir, source_dir), verbose=False)
if mod.file is None:
# Generate standard makefile
fid = open(os.path.join(source_dir, 'Makefile'), 'w')
fid.write(makefile_content % (fall3dpath, make_configuration_filename, mod.mods, mod.objs, mod.prog))
fid.close()
sys.stdout.write('Compiling %s: ' % program)
run('cd %s; make' % source_dir,
stdout=os.path.join(cwd, 'make_%s.stdout' % program),
stderr=os.path.join(cwd, 'make_%s.stderr' % program),
verbose=False)
#-----------------------------
# Test presence of executables
#-----------------------------
f = os.path.join(target_dir, mod.prog)
if os.path.isfile(f):
res = 'OK'
else:
res = 'FAILED'
print('%s' % res)
#--------------------------------------------------------
# Patch the Fall3d scripts to remove hardwired references (FIXME: unnecessary)
#--------------------------------------------------------
os.chdir(os.path.join(FALL3DHOME, fall3d_distro, 'Scripts'))
for program in ['SetDbs', 'SetGrn', 'SetSrc', 'manager', 'Fall3d_Pub']:
# Patch include statement
replace_string_in_file('Script-' + program,
'set HOME=/Users/arnaufolch/Documents/Software/Fall3d-6.0/PUB/Fall3d-6.2-PUB',
'set HOME=%s' % os.path.join(FALL3DHOME, fall3d_distro),
verbose=False)
#header('Test the installation and try the examples')
#print 'To test the installation, go to %s and run' % os.path.join(AIMHOME,
# 'testing')
#print 'python test_all.py'
#print
#print 'You can also run the provided examples individually, e.g.'
#print
#print 'python tambora.py'
#print
#print 'and check the results in tambora_output'
|
GeoscienceAustralia/PF3D
|
source/aim/install_fall3d.py
|
Python
|
gpl-3.0
| 14,568
|
[
"NetCDF"
] |
95e3eb9e213c20b9bbd63d8dd5dea1f4518d5f1c90b68d68253e3c91f24969e1
|
# -*- coding: utf-8 -*-
# * Authors:
# * TJEBBES Gaston <g.t@majerti.fr>
# * Arezki Feth <f.a@majerti.fr>;
# * Miotte Julien <j.m@majerti.fr>;
"""
User and user datas listing views
"""
import logging
from sqlalchemy import (
or_,
distinct,
)
from autonomie_base.models.base import DBSESSION
from autonomie.models.company import (
Company,
CompanyActivity,
)
from autonomie.models.user.user import User
from autonomie.models.user.login import Login
from autonomie.forms.user.user import get_list_schema
from autonomie.models.user.group import Group
from autonomie.views import BaseListView
logger = logging.getLogger(__name__)
class BaseUserListView(BaseListView):
"""
Base list for the User model
Provide :
The base User class query
The filtering on the search field
The filtering on the company activity_id
add filters to specify more specific list views (e.g: trainers, users with
account ...)
"""
title = u"Tous les comptes"
schema = None
# The columns that allow sorting
sort_columns = dict(
name=User.lastname,
email=User.email,
)
def query(self):
"""
Return the main query for our list view
"""
logger.debug("Queryiing")
query = DBSESSION().query(distinct(User.id), User)
return query.outerjoin(User.companies)
def filter_name_search(self, query, appstruct):
"""
filter the query with the provided search argument
"""
logger.debug("Filtering name")
search = appstruct['search']
if search:
query = query.filter(
or_(
User.lastname.like("%" + search + "%"),
User.firstname.like("%" + search + "%"),
User.companies.any(Company.name.like("%" + search + "%")),
User.companies.any(Company.goal.like("%" + search + "%")),
User.login.has(Login.login.like("%" + search + "%")),
)
)
return query
def filter_activity_id(self, query, appstruct):
"""
filter the query with company activities
"""
logger.debug("Filtering by activity id")
logger.debug(appstruct)
activity_id = appstruct.get('activity_id')
if activity_id:
query = query.filter(
User.companies.any(
Company.activities.any(
CompanyActivity.id == activity_id
)
)
)
logger.debug(query)
return query
def filter_user_group(self, query, appstruct):
group_id = appstruct.get('group_id');
if group_id:
query = query.filter(
User.login.has(
Login.groups.any(
Group.id == group_id
)
)
)
return query
class GeneralAccountList(BaseUserListView):
"""
List the User models with Login attached to them
Allows to filter on :
* Active/unactive Login
* Company name or User lastname/firstname
* Company acivity
Sort on:
* User.lastname
* User.email
"""
title = u"Annuaire des utilisateurs"
# The schema used to validate our search/filter form
schema = get_list_schema()
# The columns that allow sorting
sort_columns = dict(
name=User.lastname,
email=User.email,
)
def filter_login_filter(self, query, appstruct):
"""
Filter the list on accounts with login only
"""
query = query.join(User.login)
login_filter = appstruct.get('login_filter', 'active_login')
logger.debug("Filtering login : %s" % login_filter)
if login_filter == 'active_login':
logger.debug("Adding a filter on Login.active")
query = query.filter(Login.active == True)
elif login_filter == "unactive_login":
query = query.filter(Login.active == False)
return query
class GeneralUserList(BaseListView):
"""
List the users
Allows to search for companies or user name
Sorting is allowed on names and emails
"""
title = u"Annuaire des utilisateurs"
# The schema used to validate our search/filter form
schema = get_list_schema()
# The columns that allow sorting
sort_columns = dict(
name=User.lastname,
email=User.email,
)
def query(self):
"""
Return the main query for our list view
"""
logger.debug("Queryiing")
query = DBSESSION().query(distinct(User.id), User)
query = query.join(User.login)
return query.outerjoin(User.companies)
def filter_name_search(self, query, appstruct):
"""
filter the query with the provided search argument
"""
logger.debug("Filtering name")
search = appstruct['search']
if search:
query = query.filter(
or_(
User.lastname.like("%" + search + "%"),
User.firstname.like("%" + search + "%"),
User.companies.any(Company.name.like("%" + search + "%")),
User.companies.any(Company.goal.like("%" + search + "%"))
)
)
return query
def filter_activity_id(self, query, appstruct):
"""
filter the query with company activities
"""
logger.debug("Filtering by activity id")
logger.debug(appstruct)
activity_id = appstruct.get('activity_id')
if activity_id:
query = query.filter(
User.companies.any(
Company.activities.any(
CompanyActivity.id == activity_id
)
)
)
logger.debug(query)
return query
def filter_active(self, query, appstruct):
active = appstruct.get('active', 'Y')
if active == 'Y':
query = query.filter(Login.active == True)
elif active == "N":
query = query.filter(Login.active == False)
return query
def filter_user_group(self, query, appstruct):
group_id = appstruct.get('group_id');
if group_id:
query = query.filter(
User.login.any(
Login.groups.any(
Group.id == group_id
)
)
)
return query
def includeme(config):
"""
Pyramid module entry point
:param obj config: The pyramid configuration object
"""
config.add_view(
GeneralAccountList,
route_name='/users',
renderer='/user/lists.mako',
permission='visit'
)
|
CroissanceCommune/autonomie
|
autonomie/views/user/lists.py
|
Python
|
gpl-3.0
| 6,931
|
[
"VisIt"
] |
93e3ea7406b1aab5db594349d8d622ff58291446e64b2e7b6642a19bea12b5d7
|
# Copyright (C) 2018 Charlie Hoy, Collin Capano
# This program is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by the
# Free Software Foundation; either version 3 of the License, or (at your
# option) any later version.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General
# Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
"""This module provides model classes that assume the noise is Gaussian and
allows for the likelihood to be marginalized over phase and/or time and/or
distance.
"""
import itertools
import numpy
from scipy import special
from pycbc.waveform import generator
from pycbc.waveform import (NoWaveformError, FailedWaveformError)
from pycbc.detector import Detector
from .gaussian_noise import (BaseGaussianNoise,
create_waveform_generator,
GaussianNoise)
from .tools import marginalize_likelihood, DistMarg
class MarginalizedPhaseGaussianNoise(GaussianNoise):
r"""The likelihood is analytically marginalized over phase.
This class can be used with signal models that can be written as:
.. math::
\tilde{h}(f; \Theta, \phi) = A(f; \Theta)e^{i\Psi(f; \Theta) + i \phi},
where :math:`\phi` is an arbitrary phase constant. This phase constant
can be analytically marginalized over with a uniform prior as follows:
assuming the noise is stationary and Gaussian (see `GaussianNoise`
for details), the posterior is:
.. math::
p(\Theta,\phi|d)
&\propto p(\Theta)p(\phi)p(d|\Theta,\phi) \\
&\propto p(\Theta)\frac{1}{2\pi}\exp\left[
-\frac{1}{2}\sum_{i}^{N_D} \left<
h_i(\Theta,\phi) - d_i, h_i(\Theta,\phi) - d_i
\right>\right].
Here, the sum is over the number of detectors :math:`N_D`, :math:`d_i`
and :math:`h_i` are the data and signal in detector :math:`i`,
respectively, and we have assumed a uniform prior on :math:`phi \in [0,
2\pi)`. With the form of the signal model given above, the inner product
in the exponent can be written as:
.. math::
-\frac{1}{2}\left<h_i - d_i, h_i- d_i\right>
&= \left<h_i, d_i\right> -
\frac{1}{2}\left<h_i, h_i\right> -
\frac{1}{2}\left<d_i, d_i\right> \\
&= \Re\left\{O(h^0_i, d_i)e^{-i\phi}\right\} -
\frac{1}{2}\left<h^0_i, h^0_i\right> -
\frac{1}{2}\left<d_i, d_i\right>,
where:
.. math::
h_i^0 &\equiv \tilde{h}_i(f; \Theta, \phi=0); \\
O(h^0_i, d_i) &\equiv 4 \int_0^\infty
\frac{\tilde{h}_i^*(f; \Theta,0)\tilde{d}_i(f)}{S_n(f)}\mathrm{d}f.
Gathering all of the terms that are not dependent on :math:`\phi` together:
.. math::
\alpha(\Theta, d) \equiv \exp\left[-\frac{1}{2}\sum_i
\left<h^0_i, h^0_i\right> + \left<d_i, d_i\right>\right],
we can marginalize the posterior over :math:`\phi`:
.. math::
p(\Theta|d)
&\propto p(\Theta)\alpha(\Theta,d)\frac{1}{2\pi}
\int_{0}^{2\pi}\exp\left[\Re \left\{
e^{-i\phi} \sum_i O(h^0_i, d_i)
\right\}\right]\mathrm{d}\phi \\
&\propto p(\Theta)\alpha(\Theta, d)\frac{1}{2\pi}
\int_{0}^{2\pi}\exp\left[
x(\Theta,d)\cos(\phi) + y(\Theta, d)\sin(\phi)
\right]\mathrm{d}\phi.
The integral in the last line is equal to :math:`2\pi I_0(\sqrt{x^2+y^2})`,
where :math:`I_0` is the modified Bessel function of the first kind. Thus
the marginalized log posterior is:
.. math::
\log p(\Theta|d) \propto \log p(\Theta) +
I_0\left(\left|\sum_i O(h^0_i, d_i)\right|\right) -
\frac{1}{2}\sum_i\left[ \left<h^0_i, h^0_i\right> -
\left<d_i, d_i\right> \right]
"""
name = 'marginalized_phase'
def __init__(self, variable_params, data, low_frequency_cutoff, psds=None,
high_frequency_cutoff=None, normalize=False,
static_params=None, **kwargs):
# set up the boiler-plate attributes
super(MarginalizedPhaseGaussianNoise, self).__init__(
variable_params, data, low_frequency_cutoff, psds=psds,
high_frequency_cutoff=high_frequency_cutoff, normalize=normalize,
static_params=static_params, **kwargs)
@property
def _extra_stats(self):
"""Adds ``loglr``, plus ``cplx_loglr`` and ``optimal_snrsq`` in each
detector."""
return ['loglr', 'maxl_phase'] + \
['{}_optimal_snrsq'.format(det) for det in self._data]
def _nowaveform_loglr(self):
"""Convenience function to set loglr values if no waveform generated.
"""
setattr(self._current_stats, 'loglikelihood', -numpy.inf)
# maxl phase doesn't exist, so set it to nan
setattr(self._current_stats, 'maxl_phase', numpy.nan)
for det in self._data:
# snr can't be < 0 by definition, so return 0
setattr(self._current_stats, '{}_optimal_snrsq'.format(det), 0.)
return -numpy.inf
def _loglr(self):
r"""Computes the log likelihood ratio,
.. math::
\log \mathcal{L}(\Theta) =
I_0 \left(\left|\sum_i O(h^0_i, d_i)\right|\right) -
\frac{1}{2}\left<h^0_i, h^0_i\right>,
at the current point in parameter space :math:`\Theta`.
Returns
-------
float
The value of the log likelihood ratio evaluated at the given point.
"""
params = self.current_params
try:
if self.all_ifodata_same_rate_length:
wfs = self.waveform_generator.generate(**params)
else:
wfs = {}
for det in self.data:
wfs.update(self.waveform_generator[det].generate(**params))
except NoWaveformError:
return self._nowaveform_loglr()
except FailedWaveformError as e:
if self.ignore_failed_waveforms:
return self._nowaveform_loglr()
else:
raise e
hh = 0.
hd = 0j
for det, h in wfs.items():
# the kmax of the waveforms may be different than internal kmax
kmax = min(len(h), self._kmax[det])
if self._kmin[det] >= kmax:
# if the waveform terminates before the filtering low frequency
# cutoff, then the loglr is just 0 for this detector
hh_i = 0.
hd_i = 0j
else:
# whiten the waveform
h[self._kmin[det]:kmax] *= \
self._weight[det][self._kmin[det]:kmax]
# calculate inner products
hh_i = h[self._kmin[det]:kmax].inner(
h[self._kmin[det]:kmax]).real
hd_i = self._whitened_data[det][self._kmin[det]:kmax].inner(
h[self._kmin[det]:kmax])
# store
setattr(self._current_stats, '{}_optimal_snrsq'.format(det), hh_i)
hh += hh_i
hd += hd_i
self._current_stats.maxl_phase = numpy.angle(hd)
return marginalize_likelihood(hd, hh, phase=True)
class MarginalizedPolarization(BaseGaussianNoise, DistMarg):
r""" This likelihood numerically marginalizes over polarization angle
This class implements the Gaussian likelihood with an explicit numerical
marginalization over polarization angle. This is accomplished using
a fixed set of integration points distribution uniformation between
0 and 2pi. By default, 1000 integration points are used.
The 'polarization_samples' argument can be passed to set an alternate
number of integration points.
"""
name = 'marginalized_polarization'
def __init__(self, variable_params, data, low_frequency_cutoff, psds=None,
high_frequency_cutoff=None, normalize=False,
polarization_samples=1000, **kwargs):
variable_params, kwargs = self.setup_distance_marginalization(
variable_params, **kwargs)
# set up the boiler-plate attributes
super(MarginalizedPolarization, self).__init__(
variable_params, data, low_frequency_cutoff, psds=psds,
high_frequency_cutoff=high_frequency_cutoff, normalize=normalize,
**kwargs)
# Determine if all data have the same sampling rate and segment length
if self.all_ifodata_same_rate_length:
# create a waveform generator for all ifos
self.waveform_generator = create_waveform_generator(
self.variable_params, self.data,
waveform_transforms=self.waveform_transforms,
recalibration=self.recalibration,
generator_class=generator.FDomainDetFrameTwoPolGenerator,
gates=self.gates, **kwargs['static_params'])
else:
# create a waveform generator for each ifo respestively
self.waveform_generator = {}
for det in self.data:
self.waveform_generator[det] = create_waveform_generator(
self.variable_params, {det: self.data[det]},
waveform_transforms=self.waveform_transforms,
recalibration=self.recalibration,
generator_class=generator.FDomainDetFrameTwoPolGenerator,
gates=self.gates, **kwargs['static_params'])
self.polarization_samples = int(polarization_samples)
self.pol = numpy.linspace(0, 2*numpy.pi, self.polarization_samples)
self.dets = {}
@property
def _extra_stats(self):
"""Adds ``loglr``, ``maxl_polarization``, and the ``optimal_snrsq`` in
each detector.
"""
return ['loglr', 'maxl_polarization', 'maxl_loglr'] + \
['{}_optimal_snrsq'.format(det) for det in self._data]
def _nowaveform_loglr(self):
"""Convenience function to set loglr values if no waveform generated.
"""
setattr(self._current_stats, 'loglr', -numpy.inf)
# maxl phase doesn't exist, so set it to nan
setattr(self._current_stats, 'maxl_polarization', numpy.nan)
for det in self._data:
# snr can't be < 0 by definition, so return 0
setattr(self._current_stats, '{}_optimal_snrsq'.format(det), 0.)
return -numpy.inf
def _loglr(self):
r"""Computes the log likelihood ratio,
.. math::
\log \mathcal{L}(\Theta) = \sum_i
\left<h_i(\Theta)|d_i\right> -
\frac{1}{2}\left<h_i(\Theta)|h_i(\Theta)\right>,
at the current parameter values :math:`\Theta`.
Returns
-------
float
The value of the log likelihood ratio.
"""
params = self.current_params
try:
if self.all_ifodata_same_rate_length:
wfs = self.waveform_generator.generate(**params)
else:
wfs = {}
for det in self.data:
wfs.update(self.waveform_generator[det].generate(**params))
except NoWaveformError:
return self._nowaveform_loglr()
except FailedWaveformError as e:
if self.ignore_failed_waveforms:
return self._nowaveform_loglr()
else:
raise e
lr = sh_total = hh_total = 0.
for det, (hp, hc) in wfs.items():
if det not in self.dets:
self.dets[det] = Detector(det)
fp, fc = self.dets[det].antenna_pattern(self.current_params['ra'],
self.current_params['dec'],
self.pol,
self.current_params['tc'])
# the kmax of the waveforms may be different than internal kmax
kmax = min(max(len(hp), len(hc)), self._kmax[det])
slc = slice(self._kmin[det], kmax)
# whiten both polarizations
hp[self._kmin[det]:kmax] *= self._weight[det][slc]
hc[self._kmin[det]:kmax] *= self._weight[det][slc]
# h = fp * hp + hc * hc
# <h, d> = fp * <hp,d> + fc * <hc,d>
# the inner products
cplx_hpd = self._whitened_data[det][slc].inner(hp[slc]) # <hp, d>
cplx_hcd = self._whitened_data[det][slc].inner(hc[slc]) # <hc, d>
cplx_hd = fp * cplx_hpd + fc * cplx_hcd
# <h, h> = <fp * hp + fc * hc, fp * hp + fc * hc>
# = Real(fpfp * <hp,hp> + fcfc * <hc,hc> + \
# fphc * (<hp, hc> + <hc, hp>))
hphp = hp[slc].inner(hp[slc]).real # < hp, hp>
hchc = hc[slc].inner(hc[slc]).real # <hc, hc>
# Below could be combined, but too tired to figure out
# if there should be a sign applied if so
hphc = hp[slc].inner(hc[slc]).real # <hp, hc>
hchp = hc[slc].inner(hp[slc]).real # <hc, hp>
hh = fp * fp * hphp + fc * fc * hchc + fp * fc * (hphc + hchp)
# store
setattr(self._current_stats, '{}_optimal_snrsq'.format(det), hh)
sh_total += cplx_hd
hh_total += hh
lr = self.marginalize_loglr(sh_total, hh_total, skip_vector=True)
lr_total = special.logsumexp(lr) - numpy.log(len(self.pol))
# store the maxl polarization
idx = lr.argmax()
setattr(self._current_stats, 'maxl_polarization', self.pol[idx])
setattr(self._current_stats, 'maxl_loglr', lr[idx])
# just store the maxl optimal snrsq
for det in wfs:
p = '{}_optimal_snrsq'.format(det)
setattr(self._current_stats, p,
getattr(self._current_stats, p)[idx])
return float(lr_total)
class MarginalizedHMPolPhase(BaseGaussianNoise):
r"""Numerically marginalizes waveforms with higher modes over polarization
`and` phase.
This class implements the Gaussian likelihood with an explicit numerical
marginalization over polarization angle and orbital phase. This is
accomplished using a fixed set of integration points distributed uniformly
between 0 and 2:math:`\pi` for both the polarization and phase. By default,
100 integration points are used for each parameter, giving :math:`10^4`
evaluation points in total. This can be modified using the
``polarization_samples`` and ``coa_phase_samples`` arguments.
This only works with waveforms that return separate spherical harmonic
modes for each waveform. For a list of currently supported approximants,
see :py:func:`pycbc.waveform.waveform_modes.fd_waveform_mode_approximants`
and :py:func:`pycbc.waveform.waveform_modes.td_waveform_mode_approximants`.
Parameters
----------
variable_params : (tuple of) string(s)
A tuple of parameter names that will be varied.
data : dict
A dictionary of data, in which the keys are the detector names and the
values are the data (assumed to be unwhitened). All data must have the
same frequency resolution.
low_frequency_cutoff : dict
A dictionary of starting frequencies, in which the keys are the
detector names and the values are the starting frequencies for the
respective detectors to be used for computing inner products.
psds : dict, optional
A dictionary of FrequencySeries keyed by the detector names. The
dictionary must have a psd for each detector specified in the data
dictionary. If provided, the inner products in each detector will be
weighted by 1/psd of that detector.
high_frequency_cutoff : dict, optional
A dictionary of ending frequencies, in which the keys are the
detector names and the values are the ending frequencies for the
respective detectors to be used for computing inner products. If not
provided, the minimum of the largest frequency stored in the data
and a given waveform will be used.
normalize : bool, optional
If True, the normalization factor :math:`alpha` will be included in the
log likelihood. See :py:class:`GaussianNoise` for details. Default is
to not include it.
polarization_samples : int, optional
How many points to use in polarization. Default is 100.
coa_phase_samples : int, optional
How many points to use in phase. Defaults is 100.
\**kwargs :
All other keyword arguments are passed to
:py:class:`gaussian_noise.BaseGaussianNoisei <BaseGaussianNoise>`.
"""
name = 'marginalized_hmpolphase'
def __init__(self, variable_params, data, low_frequency_cutoff, psds=None,
high_frequency_cutoff=None, normalize=False,
polarization_samples=100,
coa_phase_samples=100,
static_params=None, **kwargs):
# set up the boiler-plate attributes
super(MarginalizedHMPolPhase, self).__init__(
variable_params, data, low_frequency_cutoff, psds=psds,
high_frequency_cutoff=high_frequency_cutoff, normalize=normalize,
static_params=static_params, **kwargs)
# create the waveform generator
self.waveform_generator = create_waveform_generator(
self.variable_params, self.data,
waveform_transforms=self.waveform_transforms,
recalibration=self.recalibration,
generator_class=generator.FDomainDetFrameModesGenerator,
gates=self.gates, **self.static_params)
pol = numpy.linspace(0, 2*numpy.pi, polarization_samples)
phase = numpy.linspace(0, 2*numpy.pi, coa_phase_samples)
# remap to every combination of the parameters
# this gets every combination by mappin them to an NxM grid
# one needs to be transposed so that they run allong opposite
# dimensions
n = coa_phase_samples * polarization_samples
self.nsamples = n
self.pol = numpy.resize(pol, n)
phase = numpy.resize(phase, n)
phase = phase.reshape(coa_phase_samples, polarization_samples)
self.phase = phase.T.flatten()
self._phase_fac = {}
self.dets = {}
def phase_fac(self, m):
r"""The phase :math:`\exp[i m \phi]`."""
try:
return self._phase_fac[m]
except KeyError:
# hasn't been computed yet, calculate it
self._phase_fac[m] = numpy.exp(1.0j * m * self.phase)
return self._phase_fac[m]
@property
def _extra_stats(self):
"""Adds ``maxl_polarization`` and the ``maxl_phase``
"""
return ['maxl_polarization', 'maxl_phase', ]
def _nowaveform_loglr(self):
"""Convenience function to set loglr values if no waveform generated.
"""
# maxl phase doesn't exist, so set it to nan
setattr(self._current_stats, 'maxl_polarization', numpy.nan)
setattr(self._current_stats, 'maxl_phase', numpy.nan)
return -numpy.inf
def _loglr(self, return_unmarginalized=False):
r"""Computes the log likelihood ratio,
.. math::
\log \mathcal{L}(\Theta) = \sum_i
\left<h_i(\Theta)|d_i\right> -
\frac{1}{2}\left<h_i(\Theta)|h_i(\Theta)\right>,
at the current parameter values :math:`\Theta`.
Returns
-------
float
The value of the log likelihood ratio.
"""
params = self.current_params
try:
wfs = self.waveform_generator.generate(**params)
except NoWaveformError:
return self._nowaveform_loglr()
except FailedWaveformError as e:
if self.ignore_failed_waveforms:
return self._nowaveform_loglr()
else:
raise e
# ---------------------------------------------------------------------
# Some optimizations not yet taken:
# * higher m calculations could have a lot of redundancy
# * fp/fc need not be calculated except where polarization is different
# * may be possible to simplify this by making smarter use of real/imag
# ---------------------------------------------------------------------
lr = 0.
hds = {}
hhs = {}
for det, modes in wfs.items():
if det not in self.dets:
self.dets[det] = Detector(det)
fp, fc = self.dets[det].antenna_pattern(self.current_params['ra'],
self.current_params['dec'],
self.pol,
self.current_params['tc'])
# loop over modes and prepare the waveform modes
# we will sum up zetalm = glm <ulm, d> + i glm <vlm, d>
# over all common m so that we can apply the phase once
zetas = {}
rlms = {}
slms = {}
for mode in modes:
l, m = mode
ulm, vlm = modes[mode]
# whiten the waveforms
# the kmax of the waveforms may be different than internal kmax
kmax = min(max(len(ulm), len(vlm)), self._kmax[det])
slc = slice(self._kmin[det], kmax)
ulm[self._kmin[det]:kmax] *= self._weight[det][slc]
vlm[self._kmin[det]:kmax] *= self._weight[det][slc]
# the inner products
# <ulm, d>
ulmd = ulm[slc].inner(self._whitened_data[det][slc]).real
# <vlm, d>
vlmd = vlm[slc].inner(self._whitened_data[det][slc]).real
# add inclination, and pack into a complex number
import lal
glm = lal.SpinWeightedSphericalHarmonic(
self.current_params['inclination'], 0, -2, l, m).real
if m not in zetas:
zetas[m] = 0j
zetas[m] += glm * (ulmd + 1j*vlmd)
# Get condense set of the parts of the waveform that only diff
# by m, this is used next to help calculate <h, h>
r = glm * ulm
s = glm * vlm
if m not in rlms:
rlms[m] = r
slms[m] = s
else:
rlms[m] += r
slms[m] += s
# now compute all possible <hlm, hlm>
rr_m = {}
ss_m = {}
rs_m = {}
sr_m = {}
combos = itertools.combinations_with_replacement(rlms.keys(), 2)
for m, mprime in combos:
r = rlms[m]
s = slms[m]
rprime = rlms[mprime]
sprime = slms[mprime]
rr_m[mprime, m] = r[slc].inner(rprime[slc]).real
ss_m[mprime, m] = s[slc].inner(sprime[slc]).real
rs_m[mprime, m] = s[slc].inner(rprime[slc]).real
sr_m[mprime, m] = r[slc].inner(sprime[slc]).real
# store the conjugate for easy retrieval later
rr_m[m, mprime] = rr_m[mprime, m]
ss_m[m, mprime] = ss_m[mprime, m]
rs_m[m, mprime] = sr_m[mprime, m]
sr_m[m, mprime] = rs_m[mprime, m]
# now apply the phase to all the common ms
hpd = 0.
hcd = 0.
hphp = 0.
hchc = 0.
hphc = 0.
for m, zeta in zetas.items():
phase_coeff = self.phase_fac(m)
# <h+, d> = (exp[i m phi] * zeta).real()
# <hx, d> = -(exp[i m phi] * zeta).imag()
z = phase_coeff * zeta
hpd += z.real
hcd -= z.imag
# now calculate the contribution to <h, h>
cosm = phase_coeff.real
sinm = phase_coeff.imag
for mprime in zetas:
pcprime = self.phase_fac(mprime)
cosmprime = pcprime.real
sinmprime = pcprime.imag
# needed components
rr = rr_m[m, mprime]
ss = ss_m[m, mprime]
rs = rs_m[m, mprime]
sr = sr_m[m, mprime]
# <hp, hp>
hphp += rr * cosm * cosmprime \
+ ss * sinm * sinmprime \
- rs * cosm * sinmprime \
- sr * sinm * cosmprime
# <hc, hc>
hchc += rr * sinm * sinmprime \
+ ss * cosm * cosmprime \
+ rs * sinm * cosmprime \
+ sr * cosm * sinmprime
# <hp, hc>
hphc += -rr * cosm * sinmprime \
+ ss * sinm * cosmprime \
+ sr * sinm * sinmprime \
- rs * cosm * cosmprime
# Now apply the polarizations and calculate the loglr
# We have h = Fp * hp + Fc * hc
# loglr = <h, d> - <h, h>/2
# = Fp*<hp, d> + Fc*<hc, d>
# - (1/2)*(Fp*Fp*<hp, hp> + Fc*Fc*<hc, hc>
# + 2*Fp*Fc<hp, hc>)
# (in the last line we have made use of the time series being
# real, so that <a, b> = <b, a>).
hd = fp * hpd + fc * hcd
hh = fp * fp * hphp + fc * fc * hchc + 2 * fp * fc * hphc
hds[det] = hd
hhs[det] = hh
lr += hd - 0.5 * hh
if return_unmarginalized:
return self.pol, self.phase, lr, hds, hhs
lr_total = special.logsumexp(lr) - numpy.log(self.nsamples)
# store the maxl values
idx = lr.argmax()
setattr(self._current_stats, 'maxl_polarization', self.pol[idx])
setattr(self._current_stats, 'maxl_phase', self.phase[idx])
return float(lr_total)
|
tdent/pycbc
|
pycbc/inference/models/marginalized_gaussian_noise.py
|
Python
|
gpl-3.0
| 26,761
|
[
"Gaussian"
] |
f2583720d339aaca777fe599b4e8e477758c750c11accd8e6303bf7bd4f36843
|
Classes and Object-oriented programming: examples from Python, Java and Javascript.
Among the most important programming paradigms is the object-oriented programming. Object oriented is
based on the idea of using 'objects' to build different applications. We can think of an object as a
container that contains attributes and methods that make a certain type of data useful. Using object-
oriented programming can make it much easier for application developers and programmers to build, extend,
update and maintain programs and applications.
<p>Classes as the bluprint to build objects</p>
The core concept in object-oriented programming is classes. classes are the blueprint or set of instructions
to build an object. Each object is built from a class. A single class is built to perform only one thing
and therefore a program or application contains several classes that accomplish each a specific task.
<p>Classes in python</p>
Compared with other programming languages, Python’s class mechanism adds classes with a minimum of new
syntax and semantics.
To define a class in Python, the syntax is the following
class classname:
<statement 1>
.
.
.
<statement 2>
The code above defines a class called classname without including any statement in it.
The statements included in a class are mainly of two types, variables and methods. A variable is a place
where we store a piece of data. To add a variable inside our class we can use the usual syntax adopted in
Python to define a variable.
class classname:
x = 20
In our class above we define a variable x which assumes the value 20. It is posssible to access the
variable x by typing classname.x and the compiler would return the value 20
We can also add a function inside a class, in which case the function takes the name of <i>method</i>. The
syntax for adding a function inside a class in Python is also similar to the usual syntax for functions
in Python however with an additional keyword, 'self'. First let's make an example:
class classname:
x = 20
def Hello(self):
return 'Hello World'
To access the function Hello, we can type classname. Hello and the result will be 'Hello World'.
You can notice that the function Hello takes as argument the keyword self. To understand the importance of
the self keyword, we have to understand the conceptof class <i>instance</i> and <i>instantiation</i>.
An instance of a class is a specific object built from a specific class. An instance is assigned to a
reference variable that is used to access all of the instance's properties and methods. When we create a
new instance the process is called instantiation. The keyword self is simply used when defining a method
in order to refer to the specific instance being construced from one class.
For example (considering the above class):
First = classname()
creates a new instance of the class and assigns this object to the local variable First.
Until now, we have talked about constructing standard instances of our class without any difference in the
content of each instance(all instances will have the variable x and the function Hello). However,
in real applications we are interested in creating instances of a class each having customized attributes.
For example, we can think of a class as a person who can be female or male, can have an age, height and so
on. Here comes the role of <i>class constructor</i>.
A class constructor is a special function (method) added to a class in order to allow for customizing our
class instances based on a set of parameters. In Python, the class constructor takes the name of __init__.
For example,
class classname(parameter1,parameter2,...)
def __init__(self,parameter1,parameter2):
self.var1 = self.parameter1
self.var2 = self.parameter2
self.data = []
x = 10
In the example above, the function __init__ will take whatever we pass as parameters in classname and assign
them respectively to var1 and var2. We will also have an empty list called data and a variable x that will be shared
automatically by all instances. The correct design of a class should include all the variables inside the
__init__ function. It should be noticed that, contrary to some otherlanguages like Java, Python does not
allow for multiple constructors with different signitures (parameters) within a class. Using multiple
constructors could be useful whenever we want to creates instances for which not all parameters are available
. To overcome this problem , we can use a default value for the parameters such that if they result
available for the instance being created we replace the default values with the parameter values, otherwise
the parameters assumes the default value. For esample, we can define a class as follows:
class length():
def __init__(self, initialLength = 0):
self.initialLength = initialLength
or
class length():
def __init__(self, initialLength = None):
self.initialLength = initialLength
In the above examples, if we call the class by not givigin any value to the parameter initialLength, the
created instance will have a value for initialLength equal to 0 (first example) or None (second example).
To acccess the assigned values we can always use the notation instancename.variablename, for example:
First = classname(10,20)
First.var1
10
It is possible to define methods inside a class that act on variables created by the class constructor,
class classname(parameter1,parameter2)
def __init__(self,parameter1,parameter2):
self.var1 = self.parameter1
self.var2 = self.parameter2
self.tricks = []
x = 10
def add_trick(self, trick):
self.tricks.append(trick)
X = classname(10,20)
X.add_trick(20) #this will add the value 20 to the list names trickes defined for the instance X.
Other examples,
class classname:
def __init__(self):
self.items = [] # this defines a list called items
def isEmpty(self):
return self.items == [] # returns true if items is an empty list
def push(self, item):
self.items.append(item) # appends item to items
def pop(self):
return self.items.pop() # removes and returns last object from the list.
def peek(self):
return self.items[len(self.items)-1] # returns the element before the last one
def size(self):
return len(self.items) # returns the length of items
Methods may call other methods by using method attributes of the self argument:
class classname :
def __init__(self,x):
self.x = x
self.items = []
def add_number(self,x):
self.items.append(x)
def add_twice(self,y):
self.add_number(y)
Inheritance
An important feature of classes is inheritance. Inheritance is used when we want to create a subclass of a class that has the
same features of the parent class with some additional attributes.For example, we can create the following
class for persons:
class Person(name,sport):
def __init__(self, name, Favoritesport):
self.name = name
self.Favoritesport = Favoritesport
def getName(self):
return self.name
def getSport(self):
return self.Favoritesport
def __str__(self):
return "%s is a %s" % (self.name, self.Favoritesport)
Imagine now that we want to see whether a person perfers swimming as sport and in addition whether they like
to swim in the sea.To not and to do this we create a new
class that takes the same attributes as the Person class plus the additional attribute related to swimming.
class SeaSwimmers(Person):#the argument is a class
def __init__(self, name,swim_at_sea):
Person.__init__(self, name, "Swimming") # this inherits all attributes defined in the parent class
self.swim_at_sea = swim_at_sea # this is a new attribute that s
def SeaSwim(self):
return self.swim_at_sea
To create instance of the class Person and the subclass SeaSwimmers:
x = Person("Tom","Swimming")
y = SeaSwimmers("Tom",True)
Iterators in classes
In some cases you might notice that a class contains methods called _iter_ and next(). The reason for these
functions is to add an iterator behavior to the class. For example, the following class includes an iterator
for looping over a sequence (provided as a parameter to the class) backwards.
class Reverse:
def __init__(self, data):
self.data = data
self.index = len(data)
def __iter__(self):
return self
def next(self):
if self.index == 0:
raise StopIteration
self.index = self.index - 1
return self.data[self.index]
x = Reverse([1,2,3])
x.next()
3
x.next()
2
x.next()
1
To read more about iterators in Python check <a href="https://docs.python.org/2/tutorial/classes.html">here</a>
<br>
<br>
Classes in Java
Among the most important features of the Java programming language is the tendency to declare everything
in terms of classes or, in other terms, it drives the use of Object-Oriented Programming.
Generally, to define a class in Java you need the following instructions
1- Declare the class name preceded with the keyword public
2- Declare the variables that will be used in the class constructor and their type. In Java ther are
usually called fields. Fields can have inital values or not (for examples we can have a fields like
"public static int length = 10" or simply public static int length. In some cases programmers prefer
to use private instead of public for fields.
3- Create the class constructor which must take the same name as the name of the class. Notice that, unlike
Python, with Java you can create multiple constructors for a class.
4- Create all the necessary methods. Pay attention to the use of the keyword void when creating a method,
where if the methods returns something it should not be used.
5- Instances are created inside the "main" method in Java. In Java , the instantiation of a class is
typically done using the "new" keyword which does not exist in Python.
To read more about the basics of Java you can visit my <a href="https://github.com/TamerKhraisha/BasicsofJava.github.io">Github page</a>
and you can also check the <a href="https://docs.oracle.com/javase/tutorial/java/nutsandbolts/index.html"> official Java tutorial<a>
Next we provide an example of a class called Car.
public class Car { // declare a public class called Car
int modelYear; // declare the parameter and it's type to be used in the constructor
public Car(int year) { //the constructor
modelYear = year; // set the field modelYeay to year
}
public void startEngine() { // this is a method
System.out.println("Vroom!"); // this prints "Vroom" if the method is called
}
public void drive(int distanceInMiles) { // Another method
System.out.println("Miles driven: " + distanceInMiles);
}
public static void main(String[] args){ // everything inside this function is exectuted
Car myFastCar = new Car(2007); // class instantiation
myFastCar.startEngine(); // This prints "Vroom"
myFastCar.drive(1628); // this prints "Miles driven: 1628".
}
}
<br>
Inheritance in Java
Similar to Python, it is possible for one class to share or inherit behavior from another class. This is
done by using the keyword "extends" in Java. For example, the class for Car defined above could be
considered a subclass of Veichle, since cars are a type of veichles.
public class Vehicle {
public void checkBatteryStatus() {
System.out.println("The battery is fully charged and ready to go!");
}
}
public class Car extends Vehicle { // Car extends (inherits) from Veichle
int modelYear; // define everything as we did before for car
public Car(int year) {
modelYear = year;
}
//Other methods omitted for brevity...
//Now we will have automatically the method checkBatteryStatus() inherited from Veichle.
public static void main(String[] args){
Car myFastCar = new Car(2007)
myFastCar.checkBatteryStatus();
}
}
Classes in Javascript
JavaScript is a among the most flexible object-oriented language when it comes to syntax. Interestingly,
when defining a class in Javascript we are using the standard Javascript functions to simulate classes,
since Javascript does not have a specific syntax for classes like Python or Java. It is also possible
to create and instantiate classes in Javascript using different ways. In this post, I am going to concentrate
on the the use of functions and prototype to create and manipulates classes.
To begin, let's start with the following example:
function Person(name,age) {
this.name = name;
this.age = age;
}
with the above code, we create a class called Person that takes as arguments name and age. To create an
instance of Person, we use the new prefix in Javascript,
var bob = new Person("Bob Smith", 30); // This creates a class of Person called bob
bob.name // this returns the name of bob
bob.age // this returns the age of bob
How to add a method to our class Person ? Imagine we want to add a method to Person that tells the person
to say "Hello". Here's one way to do it
var bob = new Person("Bob Smith", 30);
bob.SayHello = function() {
console.log("Woof");
};
bob.SayHello(); // this is to test that the method works
Or if we realize that we forgot to add the nationality to bob, we can type the following:
bob.nationality = "English";
So far so easy. However, in most cases we want to add a new method or attribute not to an instance (like
bob) of our class but to the class itself such that all the newly created instances will share the same
method or attribute. Here comes the role of 'prototype' in Javascript. It works as follows:
Person.prototype.nationality = "English";
Person.prototype.SayHello = function() {
console.log("Woof");
};
see more <a href="https://www.w3schools.com/js/js_object_prototypes.asp">here</a>
<br>
<br>
Inheritance in Javascript
For a class to inherit from another class in Javascript we can use prototype. For example, suppose we
want to create an animal class that takes two parameters, name and number of legs. Additionally, we
define a method for Animal class that tells returns the name of the animal.
function Animal(name, numLegs) {
this.name = name;
this.numLegs = numLegs;
this.Beautiful = "Yes"
}
Animal.prototype.sayName = function() {
console.log("Hi my name is " + this.name);
};
Now suppose we want to create another class that is specific for penguins and takes also as arguments
name and the number of legs and we want it to have the same method for printing the name as in Animal.
Here instead of repeating all the code for Animal again, we can first set the number of legs to 2, and we
left with the parameter name. In addition, if we type 'NewClass.prototype. = new ParentClass", then the
compiler will automatically import all the attributes and methods defined in the parent class and include
them in the new class that inherits from it. Notice that whatever we pass as parameter in the subclass
will still depend on what we pass as value to that class, for example in the Penguin class below we still
have 'name' as an argument and therefore any instance of Penguin will have the name attribute equal to
the value the we pass inside Penguin. On the other hand, the attribute 'Beautiful' that was defined in
the Animal constructor will be automaticallt inherited by Penguin because we didn't include it as a
parameter in the Penguin class.
// define a Penguin class
function Penguin(name){
this.name = name
this.numLegs = 2
}
Penguin.prototype = new Animal()
Penguin.sayName() \\ this will print the name
Penguin.Beautiful \\ this will print yes.
To learn more about Javascript , visit my Github webpage where I have a tutorial to learn the basics of
Javascript.
|
TamerKhraisha/webpage.github.io
|
python-java-javascript.py
|
Python
|
mit
| 15,987
|
[
"VisIt"
] |
fc37a00b12e4064f0ef58df1bcd629902271da4b0ac4b03c7b5cd0f14595e7b7
|
# Copyright (C) 2015 Henrique Pereira Coutada Miranda, Alejandro Molina Sanchez, Alexandre Morlet, Fulvio Paleari
# All rights reserved.
#
# This file is part of yambopy
#
#
from yambopy import *
import os
#
# by Henrique Miranda
#
def pack_files_in_folder(folder,save_folder=None,mask='',verbose=True):
"""
Pack the output files in a folder to json files
"""
if not save_folder: save_folder = folder
#pack the files in .json files
for dirpath,dirnames,filenames in os.walk(folder):
#check if the folder fits the mask
if mask in dirpath:
#check if there are some output files in the folder
if ([ f for f in filenames if 'o-' in f ]):
print dirpath
y = YamboOut(dirpath,save_folder=save_folder)
y.pack()
#
# by Alejandro Molina-Sanchez
#
def breaking_symmetries(efield1,efield2=[0,0,0],folder='.',RmTimeRev=True):
"""
Breaks the symmetries for a given field.
Second field used in circular polarized pump configuration
RmTimeRev : Remove time symmetry is set True by default
"""
os.system('mkdir -p %s'%folder)
os.system('cp -r database/SAVE %s'%folder)
os.system('cd %s; yambo'%folder)
ypp = YamboIn('ypp_ph -y -V all',folder=folder,filename='ypp.in')
ypp['Efield1'] = efield1 # Field in the X-direction
ypp['Efield2'] = efield2 # Field in the X-direction
if RmTimeRev:
ypp.arguments.append('RmTimeRev') # Remove Time Symmetry
ypp.write('%s/ypp.in'%folder)
os.system('cd %s ; ypp_ph -F ypp.in'%folder )
os.system('cd %s ; cd FixSymm; yambo '%folder )
os.system('rm -r %s/SAVE'%folder)
os.system('mv %s/FixSymm/SAVE %s/'%(folder,folder))
os.system('rm -r %s/FixSymm'%folder)
#
# by Alexandre Morlet & Henrique Miranda
#
def analyse_gw(folder,var,bandc,kpointc,bandv,kpointv,pack,text,draw):
"""
Study the convergence of GW calculations by looking at the change in band-gap value.
The script reads from <folder> all results from <variable> calculations and display them.
Use the band and k-point options (or change default values) according to the size of your k-grid and
the location of the band extrema.
"""
print 'Valence band: ',bandv,'conduction band: ',bandc
print 'K-point VB: ',kpointv, ' k-point CB: ',kpointc
# Packing results (o-* files) from the calculations into yambopy-friendly .json files
if pack:
print 'Packing ...'
pack_files_in_folder(folder,mask=var)
pack_files_in_folder(folder,mask='reference')
# importing data from .json files in <folder>
print 'Importing data...'
data = YamboAnalyser(folder)
# extract data according to relevant variable
outvars = data.get_data(tags=(var,'reference'))
invars = data.get_inputfiles_tag(var)
tags = data.get_tags(tags=(var,'reference'))
# Get only files related to the convergence study of the variable,
# ordered to have a smooth plot
keys=[]
sorted_invars = sorted(invars.items(), key=operator.itemgetter(1))
for i in range(0,len(sorted_invars)):
key=sorted_invars[i][0]
if key.startswith(var) or key=='reference.json':
keys.append(key)
if len(keys) == 0: raise ValueError('No files with this variable were found')
print 'Files detected:'
for key in keys:
print key
print 'Computing values...'
### Output
# Unit of the variable :
unit = invars[keys[0]]['variables'][var][1]
# The following variables are used to make the script compatible with both short and extended output
kpindex = tags[keys[0]].tolist().index('K-point')
bdindex = tags[keys[0]].tolist().index('Band')
e0index = tags[keys[0]].tolist().index('Eo')
gwindex = tags[keys[0]].tolist().index('E-Eo')
array = np.zeros((len(keys),2))
for i,key in enumerate(keys):
# input value
# GbndRnge and BndsRnX_ are special cases
if var.startswith('GbndRng') or var.startswith('BndsRnX'):
# format : [1, nband, ...]
array[i][0] = invars[key]['variables'][var][0][1]
else:
array[i][0] = invars[key]['variables'][var][0]
# Output value (gap energy)
# First the relevant lines are identified
valence=[]
conduction=[]
for j in range(len(outvars[key]+1)):
if outvars[key][j][kpindex]==kpointc and outvars[key][j][bdindex]==bandc:
conduction=outvars[key][j]
elif outvars[key][j][kpindex]==kpointv and outvars[key][j][bdindex]==bandv:
valence = outvars[key][j]
# Then the gap can be calculated
array[i][1] = conduction[e0index]+conduction[gwindex]-(valence[e0index]+valence[gwindex])
if text:
os.system('mkdir -p analyse_%s'%folder)
outname = './analyse_%s/%s_%s.dat'%(folder,folder,var)
header = var+' ('+str(unit)+'), gap'
np.savetxt(outname,array,delimiter='\t',header=header)
print 'Data saved to ',outname
if draw:
plt.plot(array[:,0],array[:,1],'o-')
plt.xlabel(var+' ('+unit+')')
plt.ylabel('E_gw = E_lda + \Delta E')
plt.savefig('%s.png'%var)
if 'DISPLAY' in os.environ:
plt.show()
print 'Done.'
#
# by Alexandre Morlet
#
def analyse_bse(folder,var,numbexc,intexc,degenexc,maxexc,pack,text,draw):
"""
Using ypp, you can study the convergence of BSE calculations in 2 ways:
Create a .png of all absorption spectra relevant to the variable you study
Look at the eigenvalues of the first n "bright" excitons (given a threshold intensity)
The script reads from <folder> all results from <variable> calculations for processing.
The resulting pictures and data files are saved in the ./analyse_<folder>/ folder.
Arguments:
folder -> Folder containing SAVE and convergence runs.
var -> Variable tested (e.g. FFTGvecs)
numbexc -> Number of excitons to read beyond threshold (default=2)
intexc -> Minimum intensity for excitons to be considered bright (default=0.05)
degenexc -> Energy threshold under which different peaks are merged (eV) (default=0.01)
maxexc -> Energy threshold after which excitons are not read anymore (eV) (default=8.0)
pack -> Skips packing o- files into .json files (default: True)
text -> Skips writing the .dat file (default: True)
draw -> Skips drawing (plotting) the abs spectra (default: True)
Returns:
excitons -> energies of the first few excitons as funciton of some variable
spectras -> absorption spectra for each variable
"""
# Packing results (o-* files) from the calculations into yambopy-friendly .json files
if pack: # True by default, False if -np used
print 'Packing ...'
pack_files_in_folder(folder,mask=var)
pack_files_in_folder(folder,mask='reference')
# importing data from .json files in <folder>
print 'Importing data...'
data = YamboAnalyser(folder)
# extract data according to relevant var
invars = data.get_inputfiles_tag(var)
# Get only files related to the convergence study of the variable,
# ordered to have a smooth plot
keys=[]
sorted_invars = sorted(invars.items(), key=operator.itemgetter(1))
for i in range(0,len(sorted_invars)):
key=sorted_invars[i][0]
if key.startswith(var) or key=='reference.json':
keys.append(key)
if len(keys) == 0: raise ValueError('No files with this variable were found')
print 'Files detected:'
for key in keys:
print key
# unit of the input value
unit = invars[keys[0]]['variables'][var][1]
######################
# Output-file filename
######################
os.system('mkdir -p analyse_%s'%folder)
outname = './analyse_%s/%s_%s'%(folder,folder,var)
# Array that will contain the output
excitons = []
spectras = []
# Loop over all calculations
for key in keys:
jobname=key.replace('.json','')
print jobname
# input value
v = invars[key]['variables'][var][0]
if type(v) == list:
inp = v[1]
else:
inp = v
print 'Preparing JSON file. Calling ypp if necessary.'
### Creating the 'absorptionspectra.json' file
# It will contain the exciton energies
y = YamboOut(folder=folder,save_folder=folder)
# Args : name of job, SAVE folder path, folder where job was run path
a = YamboBSEAbsorptionSpectra(jobname,path=folder)
# Get excitons values (runs ypp once)
a.get_excitons(min_intensity=intexc,max_energy=maxexc,Degen_Step=degenexc)
# Write .json file with spectra and eigenenergies
a.write_json(filename=outname)
### Loading data from .json file
f = open(outname+'.json')
data = json.load(f)
f.close()
### Plotting the absorption spectra
spectras.append({'x': data['E/ev[1]'],
'y': data['EPS-Im[2]'],
'label': jobname})
### BSE spectra
### Axes : lines for exciton energies (disabled, would make a mess)
#for n,exciton in enumerate(data['excitons']):
# plt.axvline(exciton['energy'])
### Creating array with exciton values (according to settings)
l = [inp]
for n,exciton in enumerate(data['excitons']):
if n <= numbexc-1:
l.append(exciton['energy'])
excitons.append(l)
if text:
header = 'Columns : '+var+' (in '+unit+') and "bright" excitons eigenenergies in order.'
## Excitons energies
#output on the screen
print header
for exc in excitons:
x = exc[0]
e = exc[1:]
print "%8.4lf "%x+("%8.4lf"*len(e))%tuple(e)
#save file
filename = outname+'_excitons.dat'
np.savetxt(filename,excitons,header=header)
print filename
## Spectra
filename = outname+'_spectra.dat'
f = open(filename,'w')
for spectra in spectras:
label = spectra['label']
f.write('#%s\n'%label)
for x,y in zip(spectra['x'],spectra['y']):
f.write("%12.8e %12.8e\n"%(x,y))
f.write('\n\n')
f.close()
print filename
else:
print '-nt flag : no text produced.'
if draw:
## Exciton energy plots
filename = outname+'_excitons.png'
excitons = np.array(excitons)
labels = [spectra['label'] for spectra in spectras]
fig = plt.figure(figsize=(6,5))
matplotlib.rcParams.update({'font.size': 15})
plt.ylabel('1st exciton energy (eV)')
plt.xticks(excitons[:,0],labels)
plt.plot(excitons[:,0],excitons[:,1])
plt.tight_layout()
plt.savefig(filename, dpi=300, bbox_inches='tight')
if 'DISPLAY' in os.environ:
plt.show()
print filename
## Spectra plots
filename = outname+'_spectra.png'
fig = plt.figure(figsize=(6,5))
matplotlib.rcParams.update({'font.size': 15})
for spectra in spectras:
plt.plot(spectra['x'],spectra['y'],label=spectra['label'])
plt.xlabel('$\omega$ (eV)')
plt.ylabel('Im($\epsilon_M$)')
plt.legend(frameon=False)
plt.tight_layout()
plt.savefig(filename, dpi=300, bbox_inches='tight')
if 'DISPLAY' in os.environ:
plt.show()
print filename
else:
print '-nd flag : no plot produced.'
print 'Done.'
return excitons, spectras
#
# by Fulvio Paleari & Henrique Miranda
#
def merge_qp(output,files,verbose=False):
#read all the files and display main info in each of them
print "=========input========="
filenames = [ f.name for f in files]
datasets = [ Dataset(filename) for filename in filenames]
QP_table, QP_kpts, QP_E_E0_Z = [], [], []
for d,filename in zip(datasets,filenames):
_, nkpoints, nqps, _, nstrings = map(int,d['PARS'][:])
print "filename: ", filename
if verbose:
print "description:"
for i in xrange(1,nstrings+1):
print ''.join(d['DESC_strings_%05d'%i][0])
else:
print "description:", ''.join(d['DESC_strings_%05d'%(nstrings)][0])
print
QP_table.append( d['QP_table'][:].T )
QP_kpts.append( d['QP_kpts'][:].T )
QP_E_E0_Z.append( d['QP_E_Eo_Z'][:] )
# create the QP_table
QP_table_save = np.vstack(QP_table)
# create the kpoints table
#create a list with the bigger size of QP_table
nkpoints = int(max(QP_table_save[:,2]))
QP_kpts_save = np.zeros([nkpoints,3])
#iterate over the QP's and store the corresponding kpoint
for qp_file,kpts in zip(QP_table,QP_kpts):
#iterate over the kpoints and save the coordinates on the list
for qp in qp_file:
n1,n2,nk = map(int,qp)
QP_kpts_save[nk-1] = kpts[nk-1]
# create the QPs energies table
QP_E_E0_Z_save = np.concatenate(QP_E_E0_Z,axis=1)
#create reference file from one of the files
netcdf_format = datasets[0].data_model
fin = datasets[0]
fout = Dataset(output,'w',format=netcdf_format)
variables_update = ['QP_table', 'QP_kpts', 'QP_E_Eo_Z']
variables_save = [QP_table_save.T, QP_kpts_save.T, QP_E_E0_Z_save]
variables_dict = dict(zip(variables_update,variables_save))
PARS_save = fin['PARS'][:]
PARS_save[1:3] = nkpoints,len(QP_table_save)
#create the description string
kmin,kmax = np.amin(QP_table_save[:,2]),np.amax(QP_table_save[:,2])
bmin,bmax = np.amin(QP_table_save[:,1]),np.amax(QP_table_save[:,1])
description = "QP @ K %03d - %03d : b %03d - %03d"%(kmin,kmax,bmin,bmax)
description_save = np.array([i for i in " %s"%description])
#output data
print "========output========="
print "filename: ", output
print "description: ", description
#copy dimensions
for dname, the_dim in fin.dimensions.iteritems():
fout.createDimension(dname, len(the_dim) if not the_dim.isunlimited() else None)
#get dimensions
def dimensions(array):
return tuple([ 'D_%010d'%d for d in array.shape ])
#create missing dimensions
for v in variables_save:
for dname,d in zip( dimensions(v),v.shape ):
if dname not in fout.dimensions.keys():
fout.createDimension(dname, d)
#copy variables
for v_name, varin in fin.variables.iteritems():
if v_name in variables_update:
#get the variable
merged = variables_dict[v_name]
# create the variable
outVar = fout.createVariable(v_name, varin.datatype, dimensions(merged))
# Copy variable attributes
outVar.setncatts({k: varin.getncattr(k) for k in varin.ncattrs()})
#save outvar
outVar[:] = merged
else:
# create the variable
outVar = fout.createVariable(v_name, varin.datatype, varin.dimensions)
# Copy variable attributes
outVar.setncatts({k: varin.getncattr(k) for k in varin.ncattrs()})
if v_name=='PARS':
outVar[:] = PARS_save[:]
elif v_name=='DESC_strings_%05d'%(nstrings):
outVar[:] = varin[:]
outVar[:,:len(description_save)] = description_save.T
else:
outVar[:] = varin[:]
fout.close()
#
# by Henrique Miranda
#
def plot_excitons(filename,cut=0.2,size=20):
from math import ceil, sqrt
def get_var(dictionary,variables):
"""
To have compatibility with different version of yambo
We provide a list of different possible tags
"""
for var in variables:
if var in dictionary:
return dictionary[var]
raise ValueError( 'Could not find the variables %s in the output file'%str(variables) )
#
# read file
#
f = open(filename)
data = json.load(f)
f.close()
#
# plot the absorption spectra
#
nexcitons = len(data['excitons'])
print "nexitons", nexcitons
plt.plot(get_var(data,['E/ev','E/ev[1]']), get_var(data,['EPS-Im[2]' ]),label='BSE',lw=2)
plt.plot(get_var(data,['E/ev','E/ev[1]']), get_var(data,['EPSo-Im[4]']),label='IP',lw=2)
for n,exciton in enumerate(data['excitons']):
plt.axvline(exciton['energy'])
plt.xlabel('$\\omega$ (eV)')
plt.ylabel('Intensity arb. units')
plt.legend(frameon=False)
plt.draw()
#
# plot excitons
#
#dimensions
nx = int(ceil(sqrt(nexcitons)))
ny = int(ceil(nexcitons*1.0/nx))
print "cols:",nx
print "rows:",ny
cmap = plt.get_cmap("gist_heat_r")
fig = plt.figure(figsize=(nx*3,ny*3))
sorted_excitons = sorted(data['excitons'],key=lambda x: x['energy'])
for n,exciton in enumerate(sorted_excitons):
#get data
w = np.array(exciton['weights'])
qpt = np.array(exciton['qpts'])
#plot
ax = plt.subplot(ny,nx,n+1)
ax.scatter(qpt[:,0], qpt[:,1], s=size, c=w, marker='H', cmap=cmap, lw=0, label="%5.2lf (eV)"%exciton['energy'])
ax.text(-cut*.9,-cut*.9,"%5.2lf (eV)"%exciton['energy'])
# axis
plt.xlim([-cut,cut])
plt.ylim([-cut,cut])
ax.yaxis.set_major_locator(plt.NullLocator())
ax.xaxis.set_major_locator(plt.NullLocator())
ax.set_aspect('equal')
plt.subplots_adjust(left=None, bottom=None, right=None, top=None, wspace=0.01, hspace=0.01)
#remove extension from file
figure_filename = os.path.splitext(filename)[0]
plt.savefig('%s.png'%figure_filename)
if 'DISPLAY' in os.environ:
plt.show()
|
henriquemiranda/yambopy
|
yambopy/recipes.py
|
Python
|
bsd-3-clause
| 17,973
|
[
"Yambo"
] |
935da6add191e268181e86b92d4a6dd2bc3e5a4f94cb73ad069406700b7a3731
|
""" Just listing the possible Properties
This module contains list of Properties that can be assigned to users and groups
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
__RCSID__ = "$Id$"
#: A host property. This property is used::
#: * For a host to forward credentials in a DISET call
TRUSTED_HOST = "TrustedHost"
#: Normal user operations
NORMAL_USER = "NormalUser"
#: CS Administrator - possibility to edit the Configuration Service
CS_ADMINISTRATOR = "CSAdministrator"
#: Job sharing among members of a group
JOB_SHARING = "JobSharing"
#: DIRAC Service Administrator
SERVICE_ADMINISTRATOR = "ServiceAdministrator"
#: Job Administrator can manipulate everybody's jobs
JOB_ADMINISTRATOR = "JobAdministrator"
#: Job Monitor - can get job monitoring information
JOB_MONITOR = "JobMonitor"
#: Accounting Monitor - can see accounting data for all groups
ACCOUNTING_MONITOR = "AccountingMonitor"
#: Private pilot
PILOT = "Pilot"
#: Generic pilot
GENERIC_PILOT = "GenericPilot"
#: Site Manager
SITE_MANAGER = "SiteManager"
#: User, group, VO Registry management
USER_MANAGER = "UserManager"
#: Operator
OPERATOR = "Operator"
#: Allow getting full delegated proxies
FULL_DELEGATION = "FullDelegation"
#: Allow getting only limited proxies (ie. pilots)
LIMITED_DELEGATION = "LimitedDelegation"
#: Allow getting only limited proxies for one self
PRIVATE_LIMITED_DELEGATION = "PrivateLimitedDelegation"
#: Allow managing proxies
PROXY_MANAGEMENT = "ProxyManagement"
#: Allow managing production
PRODUCTION_MANAGEMENT = "ProductionManagement"
#: Allow production request approval on behalf of PPG
PPG_AUTHORITY = "PPGAuthority"
#: Allow Bookkeeping Management
BOOKKEEPING_MANAGEMENT = "BookkeepingManagement"
#: Allow to set notifications and manage alarms
ALARMS_MANAGEMENT = "AlarmsManagement"
#: Allow FC Management - FC root user
FC_MANAGEMENT = "FileCatalogManagement"
#: Allow staging files
STAGE_ALLOWED = "StageAllowed"
#: Allow VMDIRAC Operations via various handlers
VM_RPC_OPERATION = "VmRpcOperation"
|
ic-hep/DIRAC
|
src/DIRAC/Core/Security/Properties.py
|
Python
|
gpl-3.0
| 2,097
|
[
"DIRAC"
] |
4fe66bd3369927f682b5216f8b4ef599dc3960babbb3fa6f301739946217aeb0
|
import sys
if sys.hexversion < 0x2060000:
raise NotImplementedError('Python < 2.6 not supported.')
import ez_setup
ez_setup.use_setuptools()
from setuptools import setup
with open('README.rst') as file:
long_description = file.read()
setup(name='pydons',
version='0.2.5',
description='Python data manimulation add-ons',
long_description=long_description,
author='Jakub Urban',
author_email='coobas at gmail dt com',
url='https://bitbucket.org/urbanj/pydons',
packages=['pydons'],
install_requires=['numpy', 'h5py>=2.1', 'hdf5storage', 'six'],
extras_require={'netCDF4': ['netCDF4']},
# requires=requires,
license='MIT',
keywords='hdf5 netCDF matlab',
classifiers=[
"Programming Language :: Python :: 2.6",
"Programming Language :: Python :: 2.7",
"Programming Language :: Python :: 3",
"Development Status :: 3 - Alpha",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
"Intended Audience :: Developers",
"Intended Audience :: Information Technology",
"Intended Audience :: Science/Research",
"Topic :: Scientific/Engineering",
"Topic :: Database",
"Topic :: Software Development :: Libraries :: Python Modules"
],
test_suite='nose.collector',
tests_require='nose>=1.0'
)
|
coobas/pydons
|
setup.py
|
Python
|
mit
| 1,438
|
[
"NetCDF"
] |
9825d9bae8f256dd91489952edaa2790fc6029070f61877f9cbeccccfd67401b
|
""" NormalizeStaticIf adds support for static guards. """
from pythran.analyses import (ImportedIds, HasReturn, IsAssigned, CFG,
HasBreak, HasContinue, DefUseChains, Ancestors,
StaticExpressions, HasStaticExpression)
from pythran.passmanager import Transformation
from pythran.syntax import PythranSyntaxError
import gast as ast
from copy import deepcopy
LOOP_NONE, EARLY_RET, LOOP_BREAK, LOOP_CONT = range(4)
def outline(name, formal_parameters, out_parameters, stmts,
has_return, has_break, has_cont):
args = ast.arguments(
[ast.Name(fp, ast.Param(), None, None) for fp in formal_parameters],
[], None, [], [], None, [])
if isinstance(stmts, ast.expr):
assert not out_parameters, "no out parameters with expr"
fdef = ast.FunctionDef(name, args, [ast.Return(stmts)], [], None, None)
else:
fdef = ast.FunctionDef(name, args, stmts, [], None, None)
# this is part of a huge trick that plays with delayed type inference
# it basically computes the return type based on out parameters, and
# the return statement is unconditionally added so if we have other
# returns, there will be a computation of the output type based on the
# __combined of the regular return types and this one The original
# returns have been patched above to have a different type that
# cunningly combines with this output tuple
#
# This is the only trick I found to let pythran compute both the output
# variable type and the early return type. But hey, a dirty one :-/
stmts.append(
ast.Return(
ast.Tuple(
[ast.Name(fp, ast.Load(), None, None)
for fp in out_parameters],
ast.Load()
)
)
)
if has_return:
pr = PatchReturn(stmts[-1], has_break or has_cont)
pr.visit(fdef)
if has_break or has_cont:
if not has_return:
stmts[-1].value = ast.Tuple([ast.Constant(LOOP_NONE, None),
stmts[-1].value],
ast.Load())
pbc = PatchBreakContinue(stmts[-1])
pbc.visit(fdef)
return fdef
class PatchReturn(ast.NodeTransformer):
def __init__(self, guard, has_break_or_cont):
self.guard = guard
self.has_break_or_cont = has_break_or_cont
def visit_Return(self, node):
if node is self.guard:
holder = "StaticIfNoReturn"
else:
holder = "StaticIfReturn"
value = node.value
return ast.Return(
ast.Call(
ast.Attribute(
ast.Attribute(
ast.Name("builtins", ast.Load(), None, None),
"pythran",
ast.Load()),
holder,
ast.Load()),
[value] if value else [ast.Constant(None, None)],
[]))
class PatchBreakContinue(ast.NodeTransformer):
def __init__(self, guard):
self.guard = guard
def visit_For(self, _):
pass
def visit_While(self, _):
pass
def patch_Control(self, node, flag):
new_node = deepcopy(self.guard)
ret_val = new_node.value
if isinstance(ret_val, ast.Call):
if flag == LOOP_BREAK:
ret_val.func.attr = "StaticIfBreak"
else:
ret_val.func.attr = "StaticIfCont"
else:
new_node.value.elts[0].value = flag
return new_node
def visit_Break(self, node):
return self.patch_Control(node, LOOP_BREAK)
def visit_Continue(self, node):
return self.patch_Control(node, LOOP_CONT)
class NormalizeStaticIf(Transformation):
def __init__(self):
super(NormalizeStaticIf, self).__init__(StaticExpressions, Ancestors,
DefUseChains)
def visit_Module(self, node):
self.new_functions = []
self.funcs = []
self.cfgs = []
self.generic_visit(node)
node.body.extend(self.new_functions)
return node
def escaping_ids(self, scope_stmt, stmts):
'gather sets of identifiers defined in stmts and used out of it'
assigned_nodes = self.gather(IsAssigned, self.make_fake(stmts))
escaping = set()
for assigned_node in assigned_nodes:
head = self.def_use_chains.chains[assigned_node]
for user in head.users():
if scope_stmt not in self.ancestors[user.node]:
escaping.add(head.name())
return escaping
@staticmethod
def make_fake(stmts):
return ast.If(ast.Constant(0, None), stmts, [])
@staticmethod
def make_dispatcher(static_expr, func_true, func_false,
imported_ids):
dispatcher_args = [static_expr,
ast.Name(func_true.name, ast.Load(), None, None),
ast.Name(func_false.name, ast.Load(), None, None)]
dispatcher = ast.Call(
ast.Attribute(
ast.Attribute(
ast.Name("builtins", ast.Load(), None, None),
"pythran",
ast.Load()),
"static_if",
ast.Load()),
dispatcher_args, [])
actual_call = ast.Call(
dispatcher,
[ast.Name(ii, ast.Load(), None, None) for ii in imported_ids],
[])
return actual_call
def true_name(self):
return "$isstatic{}".format(len(self.new_functions) + 0)
def false_name(self):
return "$isstatic{}".format(len(self.new_functions) + 1)
def visit_FunctionDef(self, node):
self.cfgs.append(self.gather(CFG, node))
self.funcs.append(node)
onode = self.generic_visit(node)
self.funcs.pop()
self.cfgs.pop()
return onode
def visit_IfExp(self, node):
self.generic_visit(node)
if node.test not in self.static_expressions:
return node
imported_ids = sorted(self.gather(ImportedIds, node))
func_true = outline(self.true_name(), imported_ids, [],
node.body, False, False, False)
func_false = outline(self.false_name(), imported_ids, [],
node.orelse, False, False, False)
self.new_functions.extend((func_true, func_false))
actual_call = self.make_dispatcher(node.test, func_true,
func_false, imported_ids)
return actual_call
def make_control_flow_handlers(self, cont_n, status_n, expected_return,
has_cont, has_break):
'''
Create the statements in charge of gathering control flow information
for the static_if result, and executes the expected control flow
instruction
'''
if expected_return:
assign = cont_ass = [ast.Assign(
[ast.Tuple(expected_return, ast.Store())],
ast.Name(cont_n, ast.Load(), None, None), None)]
else:
assign = cont_ass = []
if has_cont:
cmpr = ast.Compare(ast.Name(status_n, ast.Load(), None, None),
[ast.Eq()], [ast.Constant(LOOP_CONT, None)])
cont_ass = [ast.If(cmpr,
deepcopy(assign) + [ast.Continue()],
cont_ass)]
if has_break:
cmpr = ast.Compare(ast.Name(status_n, ast.Load(), None, None),
[ast.Eq()], [ast.Constant(LOOP_BREAK, None)])
cont_ass = [ast.If(cmpr,
deepcopy(assign) + [ast.Break()],
cont_ass)]
return cont_ass
def visit_If(self, node):
if node.test not in self.static_expressions:
return self.generic_visit(node)
imported_ids = self.gather(ImportedIds, node)
assigned_ids_left = self.escaping_ids(node, node.body)
assigned_ids_right = self.escaping_ids(node, node.orelse)
assigned_ids_both = assigned_ids_left.union(assigned_ids_right)
imported_ids.update(i for i in assigned_ids_left
if i not in assigned_ids_right)
imported_ids.update(i for i in assigned_ids_right
if i not in assigned_ids_left)
imported_ids = sorted(imported_ids)
assigned_ids = sorted(assigned_ids_both)
fbody = self.make_fake(node.body)
true_has_return = self.gather(HasReturn, fbody)
true_has_break = self.gather(HasBreak, fbody)
true_has_cont = self.gather(HasContinue, fbody)
felse = self.make_fake(node.orelse)
false_has_return = self.gather(HasReturn, felse)
false_has_break = self.gather(HasBreak, felse)
false_has_cont = self.gather(HasContinue, felse)
has_return = true_has_return or false_has_return
has_break = true_has_break or false_has_break
has_cont = true_has_cont or false_has_cont
self.generic_visit(node)
func_true = outline(self.true_name(), imported_ids, assigned_ids,
node.body, has_return, has_break, has_cont)
func_false = outline(self.false_name(), imported_ids, assigned_ids,
node.orelse, has_return, has_break, has_cont)
self.new_functions.extend((func_true, func_false))
actual_call = self.make_dispatcher(node.test,
func_true, func_false, imported_ids)
# variable modified within the static_if
expected_return = [ast.Name(ii, ast.Store(), None, None)
for ii in assigned_ids]
self.update = True
# name for various variables resulting from the static_if
n = len(self.new_functions)
status_n = "$status{}".format(n)
return_n = "$return{}".format(n)
cont_n = "$cont{}".format(n)
if has_return:
cfg = self.cfgs[-1]
always_return = all(isinstance(x, (ast.Return, ast.Yield))
for x in cfg[node])
always_return &= true_has_return and false_has_return
fast_return = [ast.Name(status_n, ast.Store(), None, None),
ast.Name(return_n, ast.Store(), None, None),
ast.Name(cont_n, ast.Store(), None, None)]
if always_return:
return [ast.Assign([ast.Tuple(fast_return, ast.Store())],
actual_call, None),
ast.Return(ast.Name(return_n, ast.Load(), None, None))]
else:
cont_ass = self.make_control_flow_handlers(cont_n, status_n,
expected_return,
has_cont, has_break)
cmpr = ast.Compare(ast.Name(status_n, ast.Load(), None, None),
[ast.Eq()], [ast.Constant(EARLY_RET, None)])
return [ast.Assign([ast.Tuple(fast_return, ast.Store())],
actual_call, None),
ast.If(cmpr,
[ast.Return(ast.Name(return_n, ast.Load(),
None, None))],
cont_ass)]
elif has_break or has_cont:
cont_ass = self.make_control_flow_handlers(cont_n, status_n,
expected_return,
has_cont, has_break)
fast_return = [ast.Name(status_n, ast.Store(), None, None),
ast.Name(cont_n, ast.Store(), None, None)]
return [ast.Assign([ast.Tuple(fast_return, ast.Store())],
actual_call, None)] + cont_ass
elif expected_return:
return ast.Assign([ast.Tuple(expected_return, ast.Store())],
actual_call, None)
else:
return ast.Expr(actual_call)
class SplitStaticExpression(Transformation):
def __init__(self):
super(SplitStaticExpression, self).__init__(StaticExpressions)
def visit_Cond(self, node):
'''
generic expression splitting algorithm. Should work for ifexp and if
using W(rap) and U(n)W(rap) to manage difference between expr and stmt
The idea is to split a BinOp in three expressions:
1. a (possibly empty) non-static expr
2. an expr containing a static expr
3. a (possibly empty) non-static expr
Once split, the if body is refactored to keep the semantic,
and then recursively split again, until all static expr are alone in a
test condition
'''
NodeTy = type(node)
if NodeTy is ast.IfExp:
def W(x):
return x
def UW(x):
return x
else:
def W(x):
return [x]
def UW(x):
return x[0]
has_static_expr = self.gather(HasStaticExpression, node.test)
if not has_static_expr:
return self.generic_visit(node)
if node.test in self.static_expressions:
return self.generic_visit(node)
if not isinstance(node.test, ast.BinOp):
return self.generic_visit(node)
before, static = [], []
values = [node.test.right, node.test.left]
def has_static_expression(n):
return self.gather(HasStaticExpression, n)
while values and not has_static_expression(values[-1]):
before.append(values.pop())
while values and has_static_expression(values[-1]):
static.append(values.pop())
after = list(reversed(values))
test_before = NodeTy(None, None, None)
if before:
assert len(before) == 1
test_before.test = before[0]
test_static = NodeTy(None, None, None)
if static:
test_static.test = static[0]
if len(static) > 1:
if after:
assert len(after) == 1
after = [ast.BinOp(static[1], node.test.op, after[0])]
else:
after = static[1:]
test_after = NodeTy(None, None, None)
if after:
assert len(after) == 1
test_after.test = after[0]
if isinstance(node.test.op, ast.BitAnd):
if after:
test_after.body = deepcopy(node.body)
test_after.orelse = deepcopy(node.orelse)
test_after = W(test_after)
else:
test_after = deepcopy(node.body)
if static:
test_static.body = test_after
test_static.orelse = deepcopy(node.orelse)
test_static = W(test_static)
else:
test_static = test_after
if before:
test_before.body = test_static
test_before.orelse = node.orelse
node = test_before
else:
node = UW(test_static)
elif isinstance(node.test.op, ast.BitOr):
if after:
test_after.body = deepcopy(node.body)
test_after.orelse = deepcopy(node.orelse)
test_after = W(test_after)
else:
test_after = deepcopy(node.orelse)
if static:
test_static.body = deepcopy(node.body)
test_static.orelse = test_after
test_static = W(test_static)
else:
test_static = test_after
if before:
test_before.body = deepcopy(node.body)
test_before.orelse = test_static
node = test_before
else:
node = UW(test_static)
else:
raise PythranSyntaxError("operator not supported in a static if",
node)
self.update = True
return self.generic_visit(node)
visit_If = visit_IfExp = visit_Cond
|
serge-sans-paille/pythran
|
pythran/transformations/normalize_static_if.py
|
Python
|
bsd-3-clause
| 16,623
|
[
"VisIt"
] |
db9df9b4173fb3e880e6ad75086d443f7e0697a4dcc66ac839948561628f8721
|
#!/usr/bin/env python
# imports
from __future__ import print_function
import os, subprocess, shutil
import _config as config
def run_command(command, logfile='runlog.log'):
'''runs a process and reports its output and status
Gets:
command -> a list of arguments, e.g. ['ls' , '-l']
logfile -> if not None, it is a file to store the output
Returns:
stdout -> both stdout + stderr
status -> bool, the status of the command (False if the return code is != 0)
'''
process = subprocess.Popen(command, shell=False, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
stdout, stderr = process.communicate()
assert stderr is None, stderr
if logfile:
loglines = []
loglines.append('running: {}\n'.format(' '.join(command)))
loglines.append(stdout)
loglines.append('='*80 + '\n\n')
with open(logfile, 'a') as f:
f.writelines(loglines)
status = False
if process.returncode is None:
print ('the process is still running ...')
elif process.returncode !=0:
print ('error: non-zero return code occured - check the log file')
else:
status = True
return stdout, status
# ===================================================================
# NAMD
# ===================================================================
def run_namd(k, logfile):
psf = config.systems[k]['psf']
pdb = config.systems[k]['pdb']
pars = '\n'.join(['parameters %s' % x for x in config.systems[k]['pars']])
conf = config.namd_conf % (psf, pdb, pars)
with open('conf', 'w') as f:
f.writelines([conf])
command = ['%s/namd2' % config.paths['namd'], '+p1', 'conf']
stdout, status = run_command(command, logfile)
if not status:
print (stdout)
return stdout, False
else:
return stdout, True
def parse_namd_output(output):
lines = output.split('\n')
result = {}
for line in lines:
if line.startswith('ENERGY:'):
f = line.split()
if f[1] == '0':
E = list(map(float, f[2:]))
result['bond'] = E[0]
result['angle'] = E[1]
result['dihedral'] = E[2]
result['improper'] = E[3]
result['coul'] = E[4]
result['vdw'] = E[5]
result['boundary'] = E[6]
result['misc'] = E[7]
result['kinetic'] = E[8]
result['total'] = E[9]
result['temp'] = E[10]
result['potential'] = E[11]
return result
# ===================================================================
# PSF2TOP
# ===================================================================
def run_psf2top(k, logfile):
psf = config.systems[k]['psf']
command = ['%s' % config.paths['psf2top']] + \
['-p', psf] + \
['-c'] + config.systems[k]['pars'] + \
['-v']
stdout, status = run_command(command, logfile)
if not status or 'ERROR' in stdout:
print (stdout)
return stdout, False
else:
return stdout, True
# ===================================================================
# GROMACS
# ===================================================================
def run_gromacs(k, logfile, mode):
assert mode in ('single', 'double')
if mode == 'single':
grompp = '%s/grompp' % config.paths['gromacs_single']
mdrun = '%s/mdrun' % config.paths['gromacs_single']
else:
grompp = '%s/grompp_d' % config.paths['gromacs_double']
mdrun = '%s/mdrun_d' % config.paths['gromacs_double']
top = 'top.top'
pdb = config.systems[k]['pdb']
mdp = config.gmx_mdp
with open('mdp.mdp', 'w') as f:
f.writelines([mdp])
command = [grompp,
'-p', top,
'-f', 'mdp.mdp',
'-c', pdb,
'-o', 'topol.tpr']
stdout, status = run_command(command, logfile)
if not status:
print(status)
return stdout, False
else:
command = [mdrun,
'-nt', '1',
'-s', 'topol.tpr',
'-rerun', pdb,
'-g', 'gromacs.log']
stdout, status = run_command(command, logfile)
if not status:
print(stdout)
return stdout, False
else:
return stdout, True
def parse_gromacs_output():
logname = 'gromacs.log'
result = {}
with open(logname) as f:
lines = f.readlines()
energy_lines = []
for i, line in enumerate(lines):
line = line.strip()
if line.startswith('Step') and lines[i+1].strip().startswith('0'):
if lines[i+3].strip().startswith('Energies'):
energy_lines = lines[i+4: i+10]
if lines[i+11].strip() != '':
print('warning: the gromacs energies are in more than 6 lines.')
break
elif lines[i+4].strip().startswith('Energies'):
energy_lines = lines[i+5: i+11]
if lines[i+12].strip() != '':
print('warning: the gromacs energies are in more than 6 lines.')
break
else:
raise RuntimeError("could not parse gromacs log")
if energy_lines != []:
for i, line in enumerate(energy_lines):
energy_lines[i] = energy_lines[i].replace('Proper Dih.', 'Proper_dih')
energy_lines[i] = energy_lines[i].replace('Improper Dih.', 'Improper_dih')
energy_lines[i] = energy_lines[i].replace('CMAP Dih.', 'CMAP_dih')
energy_lines[i] = energy_lines[i].replace('LJ (SR)', 'LJ(SR)')
energy_lines[i] = energy_lines[i].replace('Coulomb (SR)', 'Coulomb(SR)')
energy_lines[i] = energy_lines[i].replace('Kinetic En.', 'Kinetic')
energy_lines[i] = energy_lines[i].replace('Total Energy', 'TotalEnergy')
energy_lines[i] = energy_lines[i].replace('Pressure (bar)', 'Pressure(bar)')
vmap = dict(
bond = 0,
angle = 0,
dihedral = 0,
improper = 0,
vdw = 0,
coul = 0,
kinetic = 0,
total = 0,
potential =0,
)
_conv = lambda x: float(x) / 4.184
for i in range(0, len(energy_lines),2):
titles = energy_lines[i].split()
values = energy_lines[i+1].split()
assert len(titles) == len(values), energy_lines[i]
for j, ty in enumerate(titles):
if ty == 'Bond':
vmap['bond'] = _conv(values[j])
elif ty == 'U-B':
vmap['angle'] = _conv(values[j])
elif ty in ('Proper_dih', 'CMAP_dih'):
vmap['dihedral'] += _conv(values[j])
elif ty == 'Improper_dih':
vmap['improper'] = _conv(values[j])
elif ty in ('LJ(SR)', 'LJ-14'):
vmap['vdw'] += _conv(values[j])
elif ty in ('Coulomb(SR)', 'Coulomb-14'):
vmap['coul'] += _conv(values[j])
elif ty == 'Kinetic':
vmap['kinetic'] = _conv(values[j])
elif ty == 'Potential':
vmap['potential'] = _conv(values[j])
elif ty == 'TotalEnergy':
vmap['total'] = _conv(values[j])
result = vmap
return result
# ===================================================================
# MAIN function
# ===================================================================
def main():
logfile = os.path.abspath('runlog.log')
if os.path.exists(logfile):
os.remove(logfile)
maindir = os.getcwd()
systems = config.systems
systems_keys = list(systems.keys())
systems_keys.sort()
print(' ')
for k in systems_keys:
# should we skip this test ?
if k not in config.run_systems:
continue
# if the output directory for the test `k` still exists, remove it
if os.path.exists(k):
shutil.rmtree(k)
# get the absolute paths
systems[k]['psf'] = os.path.abspath(systems[k]['psf'])
systems[k]['pdb'] = os.path.abspath(systems[k]['pdb'])
for i, m in enumerate(systems[k]['pars']):
systems[k]['pars'][i] = os.path.abspath(m)
print(('running system: %s' % k))
# make a subfolder
os.mkdir(k)
os.chdir(k)
# single precision ------------------------------------------
# make gromacs folder
os.mkdir('gromacs_single')
os.chdir('gromacs_single')
# make the gromacs topology
output, ok = run_psf2top(k, logfile)
if not ok:
print('An error occured when using psf2top... exiting.')
return
# run gromacs
output, ok = run_gromacs(k, logfile, mode='single')
if not ok:
print('An error occured when running gromacs... exiting.')
return
else:
result = parse_gromacs_output()
systems[k]['gromacs_single_result'] = result
# go one level up
os.chdir('..')
# double precision ------------------------------------------
# make gromacs folder
os.mkdir('gromacs_double')
os.chdir('gromacs_double')
# make the gromacs topology
output, ok = run_psf2top(k, logfile)
if not ok:
print('An error occured when using psf2top... exiting.')
return
# run gromacs
output, ok = run_gromacs(k, logfile, mode='double')
if not ok:
print('An error occured when running gromacs... exiting.')
return
else:
result = parse_gromacs_output()
systems[k]['gromacs_double_result'] = result
# go one level up
os.chdir('..')
# make namd folder
os.mkdir('namd')
os.chdir('namd')
# run namd
output, ok = run_namd(k, logfile)
if not ok:
print('An error occured when running namd... exiting.')
return
else:
with open('namd.out', 'w') as f:
f.writelines([output])
result = parse_namd_output(output)
systems[k]['namd_result'] = result
# go one level up
os.chdir('..')
# go back to the main directory
os.chdir(maindir)
summarize_test_outputs(systems)
def summarize_test_outputs(systems):
'''
Gets:
config.systems with the energies e.g.:
{
'101': {
'psf': '...',
'gromacs_result' : {},
'namd_result' : {},
}
}
'''
print(' ')
system_keys = list(systems.keys())
system_keys.sort()
# short summary -----------------------------
print('\n\n')
desc = 'Table 1. Summary of the rmsd of potential terms between GROMACS 4.6.3 and NAMD 2.9.'
desc += ' Single and double correspond to the single and double-precision versions of GROMCAS.'
desc += ' Energies are in kcal/mol.'
print(desc)
print('-' * 26 + ' ' + '-' * 17 + ' ' + '-' * 17)
print('{:12s} {:6s} {:4s} {:^12s} {:^12s}'.format(' ','natoms', 'ff', 'GMX-NAMD (double)', 'GMX-NAMD (single)'))
print('-' * 26 + ' ' + '-' * 17 + ' ' + '-' * 17)
for k in system_keys:
if k not in config.run_systems:
continue
sys = systems[k]
potnamd = 0
potgmx_double = 0
potgmx_single = 0
s = '%12s %6d %4s ' % (sys['name'], sys['natoms'], sys['ff'] )
for m in ('bond', 'angle', 'dihedral', 'improper', 'coul', 'vdw'):
namd = systems[k]['namd_result'][m]
gromacs_double = systems[k]['gromacs_double_result'][m]
gromacs_single = systems[k]['gromacs_single_result'][m]
#potnamd += namd
potgmx_double += (gromacs_double-namd)**2
potgmx_single += (gromacs_single-namd)**2
diff_double = (potgmx_double / 6.)**0.5
diff_single = (potgmx_single / 6.)**0.5
if potnamd == 0:
pdiff_double = ''
pdiff_single = ''
else:
pdiff_double = ''
pdiff_single = ''
# pdiff_double = abs( ((potgmx_double-potnamd)/potnamd) * 100.0 )
# pdiff_double = '%5.2f' % (pdiff_double)
# pdiff_single = abs( ((potgmx_single-potnamd)/potnamd) * 100.0 )
# pdiff_single = '%5.2f' % (pdiff_single)
s += '{:16.3f} {:16.3f}'.format(diff_double, diff_single)
print(s)
print('-' * 26 + ' ' + '-' * 17 + ' ' + '-' * 17)
print(' ')
# detail summary ----------------------------
desc = 'Table 2. Detail of the NAMD and GROMACS energies (kca/mol) for select systems.\n'
print(desc)
for k in system_keys:
if k not in config.run_systems:
continue
print('-' * 59)
heading = '%s - %s \n' % (k, systems[k]['info'])
heading+= '{:10s} {:>10s} {:>10s} {:>9s} {:>9s} {:>10s} {:>9s} {:>9s}'.format(
'', 'NAMD', 'GMX (double)', 'GMX-NAMD', '% |diff|', 'GMX (single)', 'GMX-NAMD', '% |diff|')
print(heading)
for m in ('bond', 'angle', 'dihedral', 'improper', 'coul', 'vdw'):
namd = systems[k]['namd_result'][m]
gromacs_double = systems[k]['gromacs_double_result'][m]
gromacs_single = systems[k]['gromacs_single_result'][m]
diff_double = gromacs_double - namd
diff_single = gromacs_single - namd
if namd == 0:
pdiff_double = 'NA'
pdiff_single = 'NA'
else:
pdiff_double = abs( ((gromacs_double-namd)/namd) * 100.0 )
pdiff_double = '%5.3f' % (pdiff_double)
pdiff_single = abs( ((gromacs_single-namd)/namd) * 100.0 )
pdiff_single = '%5.3f' % (pdiff_single)
result = '%10s %10.2f %10.2f %9.4f %9s %10.2f %9.4f %9s' % (
m, namd, gromacs_double, diff_double, pdiff_double, gromacs_single, diff_single, pdiff_single)
print(result)
print(' ')
print('-' * 59)
if __name__ == '__main__':
main()
|
resal81/PyTopol
|
test/systems/cmp_namd_gmx.py
|
Python
|
gpl-3.0
| 14,655
|
[
"Gromacs",
"NAMD"
] |
75c31c3d88d8ef18a399bd573747d8c71dade8591241b5b7c0f92adfeadbb97e
|
# Benchmark the calculation of Ramachandran phi/psi angles from a PDB file
import time
import MDAnalysis as mda
pdb_filepath = "data/1AKE.pdb"
u = mda.Universe(pdb_filepath)
def ramachandran():
phi_angles = []
psi_angles = []
for res in u.residues:
try:
phi = res.phi_selection()
except:
pass
else:
if not phi is None:
phi_angles.append(phi.dihedral.value())
try:
psi = res.psi_selection()
except:
pass
else:
if not psi is None:
psi_angles.append(psi.dihedral.value())
return phi_angles, psi_angles
start = time.time()
ramachandran()
end = time.time()
print(end - start)
|
jgreener64/pdb-benchmarks
|
MDAnalysis/ramachandran.py
|
Python
|
mit
| 744
|
[
"MDAnalysis"
] |
21cf340adaea3c38984b870b63c317de66db6ce9b1eb36cc659d7e12993d4919
|
## This file is part of Invenio.
## Copyright (C) 2005, 2006, 2007, 2008, 2009, 2010, 2011, 2012, 2014 CERN.
##
## Invenio is free software; you can redistribute it and/or
## modify it under the terms of the GNU General Public License as
## published by the Free Software Foundation; either version 2 of the
## License, or (at your option) any later version.
##
## Invenio is distributed in the hope that it will be useful, but
## WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
## General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with Invenio; if not, write to the Free Software Foundation, Inc.,
## 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
__revision__ = "$Id$"
import urllib
import cgi
from invenio.config import \
CFG_CERN_SITE, \
CFG_SITE_LANG, \
CFG_SITE_NAME, \
CFG_SITE_NAME_INTL, \
CFG_SITE_SUPPORT_EMAIL, \
CFG_SITE_SECURE_URL, \
CFG_SITE_URL, \
CFG_WEBSESSION_RESET_PASSWORD_EXPIRE_IN_DAYS, \
CFG_WEBSESSION_ADDRESS_ACTIVATION_EXPIRE_IN_DAYS, \
CFG_WEBSESSION_DIFFERENTIATE_BETWEEN_GUESTS, \
CFG_WEBSEARCH_MAX_RECORDS_IN_GROUPS, \
CFG_ACCESS_CONTROL_LEVEL_ACCOUNTS, \
CFG_SITE_RECORD
from invenio.access_control_config import CFG_EXTERNAL_AUTH_USING_SSO, \
CFG_EXTERNAL_AUTH_LOGOUT_SSO, CFG_WEB_API_KEY_STATUS, \
CFG_OPENID_PROVIDERS, CFG_OAUTH2_PROVIDERS, \
CFG_OAUTH1_PROVIDERS, CFG_OPENID_AUTHENTICATION, \
CFG_OAUTH2_AUTHENTICATION, CFG_OAUTH1_AUTHENTICATION
from invenio.urlutils import make_canonical_urlargd, create_url, create_html_link
from invenio.htmlutils import escape_html, nmtoken_from_string
from invenio.messages import gettext_set_language, language_list_long
from invenio.websession_config import CFG_WEBSESSION_GROUP_JOIN_POLICY
class Template:
def tmpl_back_form(self, ln, message, url, link):
"""
A standard one-message-go-back-link page.
Parameters:
- 'ln' *string* - The language to display the interface in
- 'message' *string* - The message to display
- 'url' *string* - The url to go back to
- 'link' *string* - The link text
"""
out = """
<table>
<tr>
<td align="left">%(message)s
<a href="%(url)s">%(link)s</a></td>
</tr>
</table>
"""% {
'message' : message,
'url' : url,
'link' : link,
'ln' : ln
}
return out
def tmpl_external_setting(self, ln, key, value):
_ = gettext_set_language(ln)
out = """
<tr>
<td align="right"><strong>%s:</strong></td>
<td><i>%s</i></td>
</tr>""" % (key, value)
return out
def tmpl_external_user_settings(self, ln, html_settings):
_ = gettext_set_language(ln)
out = """
<p><big><strong class="headline">%(external_user_settings)s</strong></big></p>
<table>
%(html_settings)s
</table>
<p><big><strong class="headline">%(external_user_groups)s</strong></big></p>
<p>%(consult_external_groups)s</p>
""" % {
'external_user_settings' : _('External account settings'),
'html_settings' : html_settings,
'consult_external_groups' : _('You can consult the list of your external groups directly in the %(x_url_open)sgroups page%(x_url_close)s.') % {
'x_url_open' : '<a href="../yourgroups/display?ln=%s#external_groups">' % ln,
'x_url_close' : '</a>'
},
'external_user_groups' : _('External user groups'),
}
return out
def tmpl_user_api_key(self, ln=CFG_SITE_LANG, keys_info=None):
"""
Displays all the API key that the user owns the user
Parameters:
- 'ln' *string* - The language to display the interface in
- 'key_info' *tuples* - Contains the tuples with the key data (id, desciption, status)
"""
# load the right message language
_ = gettext_set_language(ln)
out = """
<script type="text/javascript">
$(document).ready(function(){
$(".key_value").hide();
$(".key_label").click(function(){
$(this).next(".key_value").slideToggle("slow");
});
});
</script>
<p><big><strong class="headline">%(user_api_key)s</strong></big></p>
""" % {
'user_api_key' : _("API keys")
}
if keys_info and len(keys_info) != 0:
out += "<p>%(user_keys)s</p>" % {'user_keys': _("These are your current API keys")}
out += """
<table>
"""
for key_info in keys_info:
out += """
<tr><td>%(key_description)s</td>
<td>%(key_status)s</td>
</tr><tr>
<td class = "key_label">
<a name="#%(index)s" href="#%(index)s"> %(key_label)s</a>
</td>
<td class="key_value"><code/>%(key_id)s</code></td>
</tr><tr>
<td></td>
<td align="left">
<form method="post" action="%(sitesecureurl)s/youraccount/apikey" name="api_key_remove">
<input type="hidden" name="key_id" value="%(key_id)s" />
<code class="blocknote"><input class="formbutton" type="%(input_type)s" value="%(remove_key)s" /></code>
</form>
</td>
</tr>
""" % {
'key_description': _("Description: " + cgi.escape(key_info[1])),
'key_status': _("Status: " + key_info[2]),
'key_id': _(key_info[0]),
'index': keys_info.index(key_info),
'key_label': _("API key"),
'remove_key' : _("Delete key"),
'sitesecureurl': CFG_SITE_SECURE_URL,
'input_type': ("submit", "hidden")[key_info[2] == CFG_WEB_API_KEY_STATUS['REVOKED']]
}
out += "</table>"
out += """
<form method="post" action="%(sitesecureurl)s/youraccount/apikey" name="api_key_create">
<p>%(create_new_key)s</p>
<table>
<tr><td align="right" valign="top"><strong>
<label for="new_key_description">%(new_key_description_label)s:</label></strong><br />
<small class="important">(%(mandatory)s)</small>
</td><td valign="top">
<input type="text" size="50" name="key_description" id="key_description" value=""/><br />
<small><span class="quicknote">%(note)s:</span>
%(new_key_description_note)s
</small>
</td>
</tr>
<tr><td></td><td align="left">
<code class="blocknote"><input class="formbutton" type="submit" value="%(create_new_key_button)s" /></code>
</td></tr>
</table>
</form>
""" % {
'create_new_key' : _("If you want to create a new API key, please enter a description for it"),
'new_key_description_label' : _("Description for the new API key"),
'mandatory' : _("mandatory"),
'note' : _("Note"),
'new_key_description_note': _("The description should be something meaningful for you to recognize the API key"),
'create_new_key_button' : _("Create new key"),
'sitesecureurl': CFG_SITE_SECURE_URL
}
return out
def tmpl_user_preferences(self, ln, email, email_disabled, password_disabled, nickname):
"""
Displays a form for the user to change his email/password.
Parameters:
- 'ln' *string* - The language to display the interface in
- 'email' *string* - The email of the user
- 'email_disabled' *boolean* - If the user has the right to edit his email
- 'password_disabled' *boolean* - If the user has the right to edit his password
- 'nickname' *string* - The nickname of the user (empty string if user does not have it)
"""
# load the right message language
_ = gettext_set_language(ln)
out = """
<p><big><strong class="headline">%(edit_params)s</strong></big></p>
<form method="post" action="%(sitesecureurl)s/youraccount/change" name="edit_logins_settings">
<p>%(change_user)s</p>
<table>
<tr><td align="right" valign="top"><strong>
<label for="nickname">%(nickname_label)s:</label></strong><br />
<small class="important">(%(mandatory)s)</small>
</td><td valign="top">
%(nickname_prefix)s%(nickname)s%(nickname_suffix)s<br />
<small><span class="quicknote">%(note)s:</span>
%(fixed_nickname_note)s
</small>
</td>
</tr>
<tr><td align="right"><strong>
<label for="email">%(new_email)s:</label></strong><br />
<small class="important">(%(mandatory)s)</small>
</td><td>
<input type="text" size="25" name="email" id="email" %(email_disabled)s value="%(email)s" /><br />
<small><span class="quicknote">%(example)s:</span>
<span class="example">john.doe@example.com</span>
</small>
</td>
</tr>
<tr><td></td><td align="left">
<input class="formbutton" type="submit" value="%(set_values)s" />
</td></tr>
</table>
<input type="hidden" name="action" value="edit" />
</form>
""" % {
'change_user' : _("If you want to change your email or set for the first time your nickname, please set new values in the form below."),
'edit_params' : _("Edit login credentials"),
'nickname_label' : _("Nickname"),
'nickname' : nickname,
'nickname_prefix' : nickname=='' and '<input type="text" size="25" name="nickname" id="nickname" value=""' or '',
'nickname_suffix' : nickname=='' and '" /><br /><small><span class="quicknote">'+_("Example")+':</span><span class="example">johnd</span></small>' or '',
'new_email' : _("New email address"),
'mandatory' : _("mandatory"),
'example' : _("Example"),
'note' : _("Note"),
'set_values' : _("Set new values"),
'email' : email,
'email_disabled' : email_disabled and "readonly" or "",
'sitesecureurl': CFG_SITE_SECURE_URL,
'fixed_nickname_note' : _('Since this is considered as a signature for comments and reviews, once set it can not be changed.')
}
if not password_disabled and not CFG_EXTERNAL_AUTH_USING_SSO:
out += """
<form method="post" action="%(sitesecureurl)s/youraccount/change" name="edit_password">
<p>%(change_pass)s</p>
<table>
<tr>
<td align="right"><strong><label for="old_password">%(old_password)s:</label></strong><br />
</td><td align="left">
<input type="password" size="25" name="old_password" id="old_password" %(password_disabled)s /><br />
<small><span class="quicknote">%(note)s:</span>
%(old_password_note)s
</small>
</td>
</tr>
<tr>
<td align="right"><strong><label for="new_password">%(new_password)s:</label></strong><br />
</td><td align="left">
<input type="password" size="25" name="password" id="new_password" %(password_disabled)s /><br />
<small><span class="quicknote">%(note)s:</span>
%(password_note)s
</small>
</td>
</tr>
<tr>
<td align="right"><strong><label for="new_password2">%(retype_password)s:</label></strong></td>
<td align="left">
<input type="password" size="25" name="password2" id="new_password2" %(password_disabled)s value="" />
</td>
</tr>
<tr><td></td><td align="left">
<input class="formbutton" type="submit" value="%(set_values)s" />
</td></tr>
</table>
<input type="hidden" name="action" value="edit" />
</form>
""" % {
'change_pass' : _("If you want to change your password, please enter the old one and set the new value in the form below."),
'mandatory' : _("mandatory"),
'old_password' : _("Old password"),
'new_password' : _("New password"),
'optional' : _("optional"),
'note' : _("Note"),
'password_note' : _("The password phrase may contain punctuation, spaces, etc."),
'old_password_note' : _("You must fill the old password in order to set a new one."),
'retype_password' : _("Retype password"),
'set_values' : _("Set new password"),
'password_disabled' : password_disabled and "disabled" or "",
'sitesecureurl': CFG_SITE_SECURE_URL,
}
elif not CFG_EXTERNAL_AUTH_USING_SSO and CFG_CERN_SITE:
out += "<p>" + _("""If you are using a lightweight CERN account you can
%(x_url_open)sreset the password%(x_url_close)s.""") % \
{'x_url_open' : \
'<a href="http://cern.ch/LightweightRegistration/ResetPassword.aspx%s">' \
% (make_canonical_urlargd({'email': email, 'returnurl' : CFG_SITE_SECURE_URL + '/youraccount/edit' + make_canonical_urlargd({'lang' : ln}, {})}, {})), 'x_url_close' : '</a>'} + "</p>"
elif CFG_EXTERNAL_AUTH_USING_SSO and CFG_CERN_SITE:
out += "<p>" + _("""You can change or reset your CERN account password by means of the %(x_url_open)sCERN account system%(x_url_close)s.""") % \
{'x_url_open' : '<a href="https://cern.ch/login/password.aspx">', 'x_url_close' : '</a>'} + "</p>"
return out
def tmpl_user_bibcatalog_auth(self, bibcatalog_username="", bibcatalog_password="", ln=CFG_SITE_LANG):
"""template for setting username and pw for bibcatalog backend"""
_ = gettext_set_language(ln)
out = """
<form method="post" action="%(sitesecureurl)s/youraccount/change" name="edit_bibcatalog_settings">
<p><big><strong class="headline">%(edit_bibcatalog_settings)s</strong></big></p>
<table>
<tr>
<td> %(username)s: <input type="text" size="25" name="bibcatalog_username" value="%(bibcatalog_username)s" id="bibcatuid"></td>
<td> %(password)s: <input type="password" size="25" name="bibcatalog_password" value="%(bibcatalog_password)s" id="bibcatpw"></td>
</tr>
<tr>
<td><input class="formbutton" type="submit" value="%(update_settings)s" /></td>
</tr>
</table></form>
""" % {
'sitesecureurl' : CFG_SITE_SECURE_URL,
'bibcatalog_username' : bibcatalog_username,
'bibcatalog_password' : bibcatalog_password,
'edit_bibcatalog_settings' : _("Edit cataloging interface settings"),
'username' : _("Username"),
'password' : _("Password"),
'update_settings' : _('Update settings')
}
return out
def tmpl_user_lang_edit(self, ln, preferred_lang):
_ = gettext_set_language(ln)
out = """
<form method="post" action="%(sitesecureurl)s/youraccount/change" name="edit_lang_settings">
<p><big><strong class="headline">%(edit_lang_settings)s</strong></big></p>
<table>
<tr><td align="right"><select name="lang" id="lang">
""" % {
'sitesecureurl' : CFG_SITE_SECURE_URL,
'edit_lang_settings' : _("Edit language-related settings"),
}
for short_ln, long_ln in language_list_long():
out += """<option %(selected)s value="%(short_ln)s">%(long_ln)s</option>""" % {
'selected' : preferred_lang == short_ln and 'selected="selected"' or '',
'short_ln' : short_ln,
'long_ln' : escape_html(long_ln)
}
out += """</select></td><td valign="top"><strong><label for="lang">%(select_lang)s</label></strong></td></tr>
<tr><td></td><td><input class="formbutton" type="submit" value="%(update_settings)s" /></td></tr>
</table></form>""" % {
'select_lang' : _('Select desired language of the web interface.'),
'update_settings' : _('Update settings')
}
return out
def tmpl_user_profiling_settings(self, ln, enable_profiling):
_ = gettext_set_language(ln)
out = """
<form method="post" action="%(sitesecureurl)s/youraccount/change" name="edit_profiling_settings">
<p><big><strong class="headline">%(edit_settings)s</strong></big></p>
<table>
<tr><td align="right"><select name="profiling">
""" % {
'sitesecureurl' : CFG_SITE_SECURE_URL,
'edit_settings' : _("Edit profiling settings"),
}
out += """<option %(selected)s value="0">%(desc)s</option>""" % {
'selected' : 'selected="selected"' if enable_profiling is False else '',
'desc' : _("Disabled")
}
out += """<option %(selected)s value="1">%(desc)s</option>""" % {
'selected' : 'selected="selected"' if enable_profiling is True else '',
'desc' : _("Enabled")
}
out += """</select></td><td valign="top"></td></tr>
<tr><td></td><td><input class="formbutton" type="submit" value="%(update_settings)s" /></td></tr>
</table></form>""" % {
'update_settings' : _('Update settings')
}
return out
def tmpl_user_websearch_edit(self, ln, current = 10, show_latestbox = True, show_helpbox = True):
_ = gettext_set_language(ln)
out = """
<form method="post" action="%(sitesecureurl)s/youraccount/change" name="edit_websearch_settings">
<p><big><strong class="headline">%(edit_websearch_settings)s</strong></big></p>
<table>
<tr><td align="right"><input type="checkbox" %(checked_latestbox)s value="1" name="latestbox" id="latestbox"/></td>
<td valign="top"><b><label for="latestbox">%(show_latestbox)s</label></b></td></tr>
<tr><td align="right"><input type="checkbox" %(checked_helpbox)s value="1" name="helpbox" id="helpbox"/></td>
<td valign="top"><b><label for="helpbox">%(show_helpbox)s</label></b></td></tr>
<tr><td align="right"><select name="group_records" id="group_records">
""" % {
'sitesecureurl' : CFG_SITE_SECURE_URL,
'edit_websearch_settings' : _("Edit search-related settings"),
'show_latestbox' : _("Show the latest additions box"),
'checked_latestbox' : show_latestbox and 'checked="checked"' or '',
'show_helpbox' : _("Show collection help boxes"),
'checked_helpbox' : show_helpbox and 'checked="checked"' or '',
}
for i in 10, 25, 50, 100, 250, 500:
if i <= CFG_WEBSEARCH_MAX_RECORDS_IN_GROUPS:
out += """<option %(selected)s>%(i)s</option>
""" % {
'selected' : current == i and 'selected="selected"' or '',
'i' : i
}
out += """</select></td><td valign="top"><strong><label for="group_records">%(select_group_records)s</label></strong></td></tr>
<tr><td></td><td><input class="formbutton" type="submit" value="%(update_settings)s" /></td></tr>
</table>
</form>""" % {
'update_settings' : _("Update settings"),
'select_group_records' : _("Number of search results per page"),
}
return out
def tmpl_user_external_auth(self, ln, methods, current, method_disabled):
"""
Displays a form for the user to change his authentication method.
Parameters:
- 'ln' *string* - The language to display the interface in
- 'methods' *array* - The methods of authentication
- 'method_disabled' *boolean* - If the user has the right to change this
- 'current' *string* - The currently selected method
"""
# load the right message language
_ = gettext_set_language(ln)
out = """
<form method="post" action="%(sitesecureurl)s/youraccount/change">
<big><strong class="headline">%(edit_method)s</strong></big>
<p>%(explain_method)s:</p>
<table>
<tr><td valign="top"><b>%(select_method)s:</b></td><td>
""" % {
'edit_method' : _("Edit login method"),
'explain_method' : _("Please select which login method you would like to use to authenticate yourself"),
'select_method' : _("Select method"),
'sitesecureurl': CFG_SITE_SECURE_URL,
}
for system in methods:
out += """<input type="radio" name="login_method" value="%(system)s" id="%(id)s" %(disabled)s %(selected)s /><label for="%(id)s">%(system)s</label><br />""" % {
'system' : system,
'disabled' : method_disabled and 'disabled="disabled"' or "",
'selected' : current == system and 'checked="checked"' or "",
'id' : nmtoken_from_string(system),
}
out += """ </td></tr>
<tr><td> </td>
<td><input class="formbutton" type="submit" value="%(select_method)s" /></td></tr></table>
</form>""" % {
'select_method' : _("Select method"),
}
return out
def tmpl_lost_password_form(self, ln):
"""
Displays a form for the user to ask for his password sent by email.
Parameters:
- 'ln' *string* - The language to display the interface in
- 'msg' *string* - Explicative message on top of the form.
"""
# load the right message language
_ = gettext_set_language(ln)
out = "<p>" + _("If you have lost the password for your %(sitename)s %(x_fmt_open)sinternal account%(x_fmt_close)s, then please enter your email address in the following form in order to have a password reset link emailed to you.") % {'x_fmt_open' : '<em>', 'x_fmt_close' : '</em>', 'sitename' : CFG_SITE_NAME_INTL[ln]} + "</p>"
out += """
<blockquote>
<form method="post" action="../youraccount/send_email">
<table>
<tr>
<td align="right"><strong><label for="p_email">%(email)s:</label></strong></td>
<td><input type="text" size="25" name="p_email" id="p_email" value="" />
<input type="hidden" name="ln" value="%(ln)s" />
<input type="hidden" name="action" value="lost" />
</td>
</tr>
<tr><td> </td>
<td><input class="formbutton" type="submit" value="%(send)s" /></td>
</tr>
</table>
</form>
</blockquote>
""" % {
'ln': ln,
'email' : _("Email address"),
'send' : _("Send password reset link"),
}
if CFG_CERN_SITE:
out += "<p>" + _("If you have been using the %(x_fmt_open)sCERN login system%(x_fmt_close)s, then you can recover your password through the %(x_url_open)sCERN authentication system%(x_url_close)s.") % {'x_fmt_open' : '<em>', 'x_fmt_close' : '</em>', 'x_url_open' : '<a href="https://cern.ch/lightweightregistration/ResetPassword.aspx%s">' \
% make_canonical_urlargd({'lf': 'auth', 'returnURL' : CFG_SITE_SECURE_URL + '/youraccount/login?ln='+ln}, {}), 'x_url_close' : '</a>'} + " "
else:
out += "<p>" + _("Note that if you have been using an external login system, then we cannot do anything and you have to ask there.") + " "
out += _("Alternatively, you can ask %s to change your login system from external to internal.") % ("""<a href="mailto:%(email)s">%(email)s</a>""" % { 'email' : CFG_SITE_SUPPORT_EMAIL }) + "</p>"
return out
def tmpl_account_info(self, ln, uid, guest, CFG_CERN_SITE):
"""
Displays the account information
Parameters:
- 'ln' *string* - The language to display the interface in
- 'uid' *string* - The user id
- 'guest' *boolean* - If the user is guest
- 'CFG_CERN_SITE' *boolean* - If the site is a CERN site
"""
# load the right message language
_ = gettext_set_language(ln)
out = """<p>%(account_offer)s</p>
<blockquote>
<dl>
""" % {
'account_offer' : _("%s offers you the possibility to personalize the interface, to set up your own personal library of documents, or to set up an automatic alert query that would run periodically and would notify you of search results by email.") % CFG_SITE_NAME_INTL[ln],
}
if not guest:
out += """
<dt>
<a href="./edit?ln=%(ln)s">%(your_settings)s</a>
</dt>
<dd>%(change_account)s</dd>""" % {
'ln' : ln,
'your_settings' : _("Your Settings"),
'change_account' : _("Set or change your account email address or password. Specify your preferences about the look and feel of the interface.")
}
out += """
<dt><a href="../youralerts/display?ln=%(ln)s">%(your_searches)s</a></dt>
<dd>%(search_explain)s</dd>""" % {
'ln' : ln,
'your_searches' : _("Your Searches"),
'search_explain' : _("View all the searches you performed during the last 30 days."),
}
out += """
<dt><a href="../yourbaskets/display?ln=%(ln)s">%(your_baskets)s</a></dt>
<dd>%(basket_explain)s""" % {
'ln' : ln,
'your_baskets' : _("Your Baskets"),
'basket_explain' : _("With baskets you can define specific collections of items, store interesting records you want to access later or share with others."),
}
if not guest:
out += """
<dt><a href="../yourcomments/?ln=%(ln)s">%(your_comments)s</a></dt>
<dd>%(comments_explain)s""" % {
'ln' : ln,
'your_comments' : _("Your Comments"),
'comments_explain' : _("Display all the comments you have submitted so far."),
}
if guest and CFG_WEBSESSION_DIFFERENTIATE_BETWEEN_GUESTS:
out += self.tmpl_warning_guest_user(ln = ln, type = "baskets")
out += """</dd>
<dt><a href="../youralerts/list?ln=%(ln)s">%(your_alerts)s</a></dt>
<dd>%(explain_alerts)s""" % {
'ln' : ln,
'your_alerts' : _("Your Alerts"),
'explain_alerts' : _("Subscribe to a search which will be run periodically by our service. The result can be sent to you via Email or stored in one of your baskets."),
}
if guest and CFG_WEBSESSION_DIFFERENTIATE_BETWEEN_GUESTS:
out += self.tmpl_warning_guest_user(type="alerts", ln = ln)
out += "</dd>"
if CFG_CERN_SITE:
out += """</dd>
<dt><a href="%(CFG_SITE_SECURE_URL)s/yourloans/display?ln=%(ln)s">%(your_loans)s</a></dt>
<dd>%(explain_loans)s</dd>""" % {
'your_loans' : _("Your Loans"),
'explain_loans' : _("Check out book you have on loan, submit borrowing requests, etc. Requires CERN ID."),
'ln': ln,
'CFG_SITE_SECURE_URL': CFG_SITE_SECURE_URL
}
out += """
</dl>
</blockquote>"""
return out
def tmpl_warning_guest_user(self, ln, type):
"""
Displays a warning message about the specified type
Parameters:
- 'ln' *string* - The language to display the interface in
- 'type' *string* - The type of data that will get lost in case of guest account (for the moment: 'alerts' or 'baskets')
"""
# load the right message language
_ = gettext_set_language(ln)
if (type=='baskets'):
msg = _("You are logged in as a guest user, so your baskets will disappear at the end of the current session.") + ' '
elif (type=='alerts'):
msg = _("You are logged in as a guest user, so your alerts will disappear at the end of the current session.") + ' '
msg += _("If you wish you can %(x_url_open)slogin or register here%(x_url_close)s.") % {'x_url_open': '<a href="' + CFG_SITE_SECURE_URL + '/youraccount/login?ln=' + ln + '">',
'x_url_close': '</a>'}
return """<table class="errorbox" summary="">
<tr>
<th class="errorboxheader">%s</th>
</tr>
</table>""" % msg
def tmpl_account_body(self, ln, user):
"""
Displays the body of the actions of the user
Parameters:
- 'ln' *string* - The language to display the interface in
- 'user' *string* - The username (nickname or email)
"""
# load the right message language
_ = gettext_set_language(ln)
out = _("You are logged in as %(x_user)s. You may want to a) %(x_url1_open)slogout%(x_url1_close)s; b) edit your %(x_url2_open)saccount settings%(x_url2_close)s.") %\
{'x_user': user,
'x_url1_open': '<a href="' + CFG_SITE_SECURE_URL + '/youraccount/logout?ln=' + ln + '">',
'x_url1_close': '</a>',
'x_url2_open': '<a href="' + CFG_SITE_SECURE_URL + '/youraccount/edit?ln=' + ln + '">',
'x_url2_close': '</a>',
}
return out + "<br /><br />"
def tmpl_account_template(self, title, body, ln, url):
"""
Displays a block of the your account page
Parameters:
- 'ln' *string* - The language to display the interface in
- 'title' *string* - The title of the block
- 'body' *string* - The body of the block
- 'url' *string* - The URL to go to the proper section
"""
out ="""
<table class="youraccountbox" width="90%%" summary="" >
<tr>
<th class="youraccountheader"><a href="%s">%s</a></th>
</tr>
<tr>
<td class="youraccountbody">%s</td>
</tr>
</table>""" % (url, title, body)
return out
def tmpl_account_page(self, ln, warnings, warning_list, accBody, baskets, alerts, searches, messages, loans, groups, submissions, approvals, tickets, administrative, comments):
"""
Displays the your account page
Parameters:
- 'ln' *string* - The language to display the interface in
- 'accBody' *string* - The body of the heading block
- 'baskets' *string* - The body of the baskets block
- 'alerts' *string* - The body of the alerts block
- 'searches' *string* - The body of the searches block
- 'messages' *string* - The body of the messages block
- 'groups' *string* - The body of the groups block
- 'submissions' *string* - The body of the submission block
- 'approvals' *string* - The body of the approvals block
- 'administrative' *string* - The body of the administrative block
- 'comments' *string* - The body of the comments block
"""
# load the right message language
_ = gettext_set_language(ln)
out = ""
if warnings == "1":
out += self.tmpl_general_warnings(warning_list)
out += self.tmpl_account_template(_("Your Account"), accBody, ln, '/youraccount/edit?ln=%s' % ln)
if messages:
out += self.tmpl_account_template(_("Your Messages"), messages, ln, '/yourmessages/display?ln=%s' % ln)
if loans:
out += self.tmpl_account_template(_("Your Loans"), loans, ln, '/yourloans/display?ln=%s' % ln)
if baskets:
out += self.tmpl_account_template(_("Your Baskets"), baskets, ln, '/yourbaskets/display?ln=%s' % ln)
if comments:
comments_description = _("You can consult the list of %(x_url_open)syour comments%(x_url_close)s submitted so far.")
comments_description %= {'x_url_open': '<a href="' + CFG_SITE_URL + '/yourcomments/?ln=' + ln + '">',
'x_url_close': '</a>'}
out += self.tmpl_account_template(_("Your Comments"), comments_description, ln, '/yourcomments/?ln=%s' % ln)
if alerts:
out += self.tmpl_account_template(_("Your Alert Searches"), alerts, ln, '/youralerts/list?ln=%s' % ln)
if searches:
out += self.tmpl_account_template(_("Your Searches"), searches, ln, '/youralerts/display?ln=%s' % ln)
if groups:
groups_description = _("You can consult the list of %(x_url_open)syour groups%(x_url_close)s you are administering or are a member of.")
groups_description %= {'x_url_open': '<a href="' + CFG_SITE_URL + '/yourgroups/display?ln=' + ln + '">',
'x_url_close': '</a>'}
out += self.tmpl_account_template(_("Your Groups"), groups_description, ln, '/yourgroups/display?ln=%s' % ln)
if submissions:
submission_description = _("You can consult the list of %(x_url_open)syour submissions%(x_url_close)s and inquire about their status.")
submission_description %= {'x_url_open': '<a href="' + CFG_SITE_URL + '/yoursubmissions.py?ln=' + ln + '">',
'x_url_close': '</a>'}
out += self.tmpl_account_template(_("Your Submissions"), submission_description, ln, '/yoursubmissions.py?ln=%s' % ln)
if approvals:
approval_description = _("You can consult the list of %(x_url_open)syour approvals%(x_url_close)s with the documents you approved or refereed.")
approval_description %= {'x_url_open': '<a href="' + CFG_SITE_URL + '/yourapprovals.py?ln=' + ln + '">',
'x_url_close': '</a>'}
out += self.tmpl_account_template(_("Your Approvals"), approval_description, ln, '/yourapprovals.py?ln=%s' % ln)
#check if this user might have tickets
if tickets:
ticket_description = _("You can consult the list of %(x_url_open)syour tickets%(x_url_close)s.")
ticket_description %= {'x_url_open': '<a href="' + CFG_SITE_URL + '/yourtickets?ln=' + ln + '">',
'x_url_close': '</a>'}
out += self.tmpl_account_template(_("Your Tickets"), ticket_description, ln, '/yourtickets?ln=%s' % ln)
if administrative:
out += self.tmpl_account_template(_("Your Administrative Activities"), administrative, ln, '/admin')
return out
def tmpl_account_emailMessage(self, ln, msg):
"""
Displays a link to retrieve the lost password
Parameters:
- 'ln' *string* - The language to display the interface in
- 'msg' *string* - Explicative message on top of the form.
"""
# load the right message language
_ = gettext_set_language(ln)
out =""
out +="""
<body>
%(msg)s <a href="../youraccount/lost?ln=%(ln)s">%(try_again)s</a>
</body>
""" % {
'ln' : ln,
'msg' : msg,
'try_again' : _("Try again")
}
return out
def tmpl_account_reset_password_email_body(self, email, reset_key, ip_address, ln=CFG_SITE_LANG):
"""
The body of the email that sends lost internal account
passwords to users.
"""
_ = gettext_set_language(ln)
out = """
%(intro)s
%(intro2)s
<%(link)s>
%(outro)s
%(outro2)s""" % {
'intro': _("Somebody (possibly you) coming from %(x_ip_address)s "
"has asked\nfor a password reset at %(x_sitename)s\nfor "
"the account \"%(x_email)s\"." % {
'x_sitename' :CFG_SITE_NAME_INTL.get(ln, CFG_SITE_NAME),
'x_email' : email,
'x_ip_address' : ip_address,
}
),
'intro2' : _("If you want to reset the password for this account, please go to:"),
'link' : "%s/youraccount/access%s" %
(CFG_SITE_SECURE_URL, make_canonical_urlargd({
'ln' : ln,
'mailcookie' : reset_key
}, {})),
'outro' : _("in order to confirm the validity of this request."),
'outro2' : _("Please note that this URL will remain valid for about %(days)s days only.") % {'days': CFG_WEBSESSION_RESET_PASSWORD_EXPIRE_IN_DAYS},
}
return out
def tmpl_account_address_activation_email_body(self, email, address_activation_key, ip_address, ln=CFG_SITE_LANG):
"""
The body of the email that sends email address activation cookie
passwords to users.
"""
_ = gettext_set_language(ln)
out = """
%(intro)s
%(intro2)s
<%(link)s>
%(outro)s
%(outro2)s""" % {
'intro': _("Somebody (possibly you) coming from %(x_ip_address)s "
"has asked\nto register a new account at %(x_sitename)s\nfor the "
"email address \"%(x_email)s\"." % {
'x_sitename' :CFG_SITE_NAME_INTL.get(ln, CFG_SITE_NAME),
'x_email' : email,
'x_ip_address' : ip_address,
}
),
'intro2' : _("If you want to complete this account registration, please go to:"),
'link' : "%s/youraccount/access%s" %
(CFG_SITE_SECURE_URL, make_canonical_urlargd({
'ln' : ln,
'mailcookie' : address_activation_key
}, {})),
'outro' : _("in order to confirm the validity of this request."),
'outro2' : _("Please note that this URL will remain valid for about %(days)s days only.") % {'days' : CFG_WEBSESSION_ADDRESS_ACTIVATION_EXPIRE_IN_DAYS},
}
return out
def tmpl_account_emailSent(self, ln, email):
"""
Displays a confirmation message for an email sent
Parameters:
- 'ln' *string* - The language to display the interface in
- 'email' *string* - The email to which the message has been sent
"""
# load the right message language
_ = gettext_set_language(ln)
out =""
out += _("Okay, a password reset link has been emailed to %s.") % email
return out
def tmpl_account_delete(self, ln):
"""
Displays a confirmation message about deleting the account
Parameters:
- 'ln' *string* - The language to display the interface in
"""
# load the right message language
_ = gettext_set_language(ln)
out = "<p>" + _("""Deleting your account""") + '</p>'
return out
def tmpl_account_logout(self, ln):
"""
Displays a confirmation message about logging out
Parameters:
- 'ln' *string* - The language to display the interface in
"""
# load the right message language
_ = gettext_set_language(ln)
out = _("You are no longer recognized by our system.") + ' '
if CFG_EXTERNAL_AUTH_USING_SSO and CFG_EXTERNAL_AUTH_LOGOUT_SSO:
out += _("""You are still recognized by the centralized
%(x_fmt_open)sSSO%(x_fmt_close)s system. You can
%(x_url_open)slogout from SSO%(x_url_close)s, too.""") % \
{'x_fmt_open' : '<strong>', 'x_fmt_close' : '</strong>',
'x_url_open' : '<a href="%s">' % CFG_EXTERNAL_AUTH_LOGOUT_SSO,
'x_url_close' : '</a>'}
out += '<br />'
out += _("If you wish you can %(x_url_open)slogin here%(x_url_close)s.") % \
{'x_url_open': '<a href="./login?ln=' + ln + '">',
'x_url_close': '</a>'}
return out
def tmpl_login_form(self, ln, referer, internal, register_available, methods, selected_method, msg=None):
"""
Displays a login form
Parameters:
- 'ln' *string* - The language to display the interface in
- 'referer' *string* - The referer URL - will be redirected upon after login
- 'internal' *boolean* - If we are producing an internal authentication
- 'register_available' *boolean* - If users can register freely in the system
- 'methods' *array* - The available authentication methods
- 'selected_method' *string* - The default authentication method
- 'msg' *string* - The message to print before the form, if needed
"""
# load the right message language
_ = gettext_set_language(ln)
out = "<div style='float:left'>"
if msg is "":
out += "<p>%(please_login)s</p>" % {
'please_login' : cgi.escape(_("If you already have an account, please login using the form below."))
}
if CFG_CERN_SITE:
out += "<p>" + _("If you don't own a CERN account yet, you can register a %(x_url_open)snew CERN lightweight account%(x_url_close)s.") % {'x_url_open' : '<a href="https://www.cern.ch/lightweightregistration/RegisterAccount.aspx">', 'x_url_close' : '</a>'} + "</p>"
else:
if register_available:
out += "<p>"+_("If you don't own an account yet, please %(x_url_open)sregister%(x_url_close)s an internal account.") %\
{'x_url_open': '<a href="../youraccount/register?ln=' + ln + '">',
'x_url_close': '</a>'} + "</p>"
else:
# users cannot register accounts, so advise them
# how to get one, or be silent about register
# facility if account level is more than 4:
if CFG_ACCESS_CONTROL_LEVEL_ACCOUNTS < 5:
out += "<p>" + _("If you don't own an account yet, please contact %s.") % ('<a href="mailto:%s">%s</a>' % (cgi.escape(CFG_SITE_SUPPORT_EMAIL, True), cgi.escape(CFG_SITE_SUPPORT_EMAIL))) + "</p>"
else:
out += "<p>%s</p>" % msg
out += """<form method="post" action="%(CFG_SITE_SECURE_URL)s/youraccount/login">
<table>
""" % {'CFG_SITE_SECURE_URL': CFG_SITE_SECURE_URL}
if len(methods) - CFG_OPENID_AUTHENTICATION - CFG_OAUTH2_AUTHENTICATION - CFG_OAUTH1_AUTHENTICATION > 1:
# more than one method, must make a select
login_select = """<select name="login_method" id="login_method">"""
for method in methods:
# OpenID/OAuth shouldn't be shown in this list.
if not method in ['openid', 'oauth1', 'oauth2']:
login_select += """<option value="%(method)s" %(selected)s>%(method)s</option>""" % {
'method' : cgi.escape(method, True),
'selected' : (method == selected_method and 'selected="selected"' or "")
}
login_select += "</select>"
out += """
<tr>
<td align="right"><strong><label for="login_method">%(login_title)s</label></strong></td>
<td>%(login_select)s</td>
</tr>""" % {
'login_title' : cgi.escape(_("Login method:")),
'login_select' : login_select,
}
else:
# only one login method available
out += """<input type="hidden" name="login_method" value="%s" />""" % cgi.escape(methods[0], True)
out += """<tr>
<td align="right">
<input type="hidden" name="ln" value="%(ln)s" />
<input type="hidden" name="referer" value="%(referer)s" />
<strong><label for="p_un">%(username)s:</label></strong>
</td>
<td><input type="text" size="25" name="p_un" id="p_un" value="" /></td>
</tr>
<tr>
<td align="right"><strong><label for="p_pw">%(password)s:</label></strong></td>
<td align="left"><input type="password" size="25" name="p_pw" id="p_pw" value="" /></td>
</tr>
<tr>
<td></td>
<td align="left"><input type="checkbox" name="remember_me" id="remember_me"/><em><label for="remember_me">%(remember_me)s</label></em></td>
<tr>
<td></td>
<td align="center" colspan="3"><input class="formbutton" type="submit" name="action" value="%(login)s" />""" % {
'ln': cgi.escape(ln, True),
'referer' : cgi.escape(referer, True),
'username' : cgi.escape(_("Username")),
'password' : cgi.escape(_("Password")),
'remember_me' : cgi.escape(_("Remember login on this computer.")),
'login' : cgi.escape(_("login")),
}
if internal:
out += """ (<a href="./lost?ln=%(ln)s">%(lost_pass)s</a>)""" % {
'ln' : cgi.escape(ln, True),
'lost_pass' : cgi.escape(_("Lost your password?"))
}
out += """</td>
</tr>
</table></form>"""
out += """<p><strong>%(note)s:</strong> %(note_text)s</p>""" % {
'note' : cgi.escape(_("Note")),
'note_text': cgi.escape(_("You can use your nickname or your email address to login."))}
out += "</div>"
if CFG_OPENID_AUTHENTICATION or \
CFG_OAUTH2_AUTHENTICATION or \
CFG_OAUTH1_AUTHENTICATION:
# If OpenID or OAuth authentication is enabled, we put the login
# forms of providers.
out += self.tmpl_external_login_panel(ln, referer)
return out
def tmpl_lost_your_password_teaser(self, ln=CFG_SITE_LANG):
"""Displays a short sentence to attract user to the fact that
maybe he lost his password. Used by the registration page.
"""
_ = gettext_set_language(ln)
out = ""
out += """<a href="./lost?ln=%(ln)s">%(maybe_lost_pass)s</a>""" % {
'ln' : ln,
'maybe_lost_pass': ("Maybe you have lost your password?")
}
return out
def tmpl_reset_password_form(self, ln, email, reset_key, msg=''):
"""Display a form to reset the password."""
_ = gettext_set_language(ln)
out = ""
out = "<p>%s</p>" % _("Your request is valid. Please set the new "
"desired password in the following form.")
if msg:
out += """<p class='warning'>%s</p>""" % msg
out += """
<form method="post" action="../youraccount/resetpassword?ln=%(ln)s">
<input type="hidden" name="k" value="%(reset_key)s" />
<input type="hidden" name="e" value="%(email)s" />
<input type="hidden" name="reset" value="1" />
<table>
<tr><td align="right"><strong>%(set_password_for)s</strong>:</td><td><em>%(email)s</em></td></tr>
<tr><td align="right"><strong><label for="password">%(type_new_password)s:</label></strong></td>
<td><input type="password" name="password" id="password" value="123" /></td></tr>
<tr><td align="right"><strong><label for="password2">%(type_it_again)s:</label></strong></td>
<td><input type="password" name="password2" id="password2" value="" /></td></tr>
<tr><td align="center" colspan="2">
<input class="formbutton" type="submit" name="action" value="%(set_new_password)s" />
</td></tr>
</table>
</form>""" % {
'ln' : ln,
'reset_key' : reset_key,
'email' : email,
'set_password_for' : _('Set a new password for'),
'type_new_password' : _('Type the new password'),
'type_it_again' : _('Type again the new password'),
'set_new_password' : _('Set the new password')
}
return out
def tmpl_register_page(self, ln, referer, level):
"""
Displays a login form
Parameters:
- 'ln' *string* - The language to display the interface in
- 'referer' *string* - The referer URL - will be redirected upon after login
- 'level' *int* - Login level (0 - all access, 1 - accounts activated, 2+ - no self-registration)
"""
# load the right message language
_ = gettext_set_language(ln)
out = ""
if level <= 1:
out += _("Please enter your email address and desired nickname and password:")
if level == 1:
out += _("It will not be possible to use the account before it has been verified and activated.")
out += """
<form method="post" action="../youraccount/register">
<input type="hidden" name="referer" value="%(referer)s" />
<input type="hidden" name="ln" value="%(ln)s" />
<table>
<tr>
<td align="right"><strong><label for="p_email">%(email_address)s:</label></strong><br /><small class="important">(%(mandatory)s)</small></td>
<td><input type="text" size="25" name="p_email" id="p_email" value="" /><br />
<small><span class="quicknote">%(example)s:</span>
<span class="example">john.doe@example.com</span></small>
</td>
<td></td>
</tr>
<tr>
<td align="right"><strong><label for="p_nickname">%(nickname)s:</label></strong><br /><small class="important">(%(mandatory)s)</small></td>
<td><input type="text" size="25" name="p_nickname" id="p_nickname" value="" /><br />
<small><span class="quicknote">%(example)s:</span>
<span class="example">johnd</span></small>
</td>
<td></td>
</tr>
<tr>
<td align="right"><strong><label for="p_pw">%(password)s:</label></strong><br /><small class="quicknote">(%(optional)s)</small></td>
<td align="left"><input type="password" size="25" name="p_pw" id="p_pw" value="" /><br />
<small><span class="quicknote">%(note)s:</span> %(password_contain)s</small>
</td>
<td></td>
</tr>
<tr>
<td align="right"><strong><label for="p_pw2">%(retype)s:</label></strong></td>
<td align="left"><input type="password" size="25" name="p_pw2" id="p_pw2" value="" /></td>
<td></td>
</tr>
<tr>
<td></td>
<td align="left" colspan="3"><input class="formbutton" type="submit" name="action" value="%(register)s" /></td>
</tr>
</table>
</form>
<p><strong>%(note)s:</strong> %(explain_acc)s""" % {
'referer' : cgi.escape(referer),
'ln' : cgi.escape(ln),
'email_address' : _("Email address"),
'nickname' : _("Nickname"),
'password' : _("Password"),
'mandatory' : _("mandatory"),
'optional' : _("optional"),
'example' : _("Example"),
'note' : _("Note"),
'password_contain' : _("The password phrase may contain punctuation, spaces, etc."),
'retype' : _("Retype Password"),
'register' : _("register"),
'explain_acc' : _("Please do not use valuable passwords such as your Unix, AFS or NICE passwords with this service. Your email address will stay strictly confidential and will not be disclosed to any third party. It will be used to identify you for personal services of %s. For example, you may set up an automatic alert search that will look for new preprints and will notify you daily of new arrivals by email.") % CFG_SITE_NAME,
}
else:
# level >=2, so users cannot register accounts
out += "<p>" + _("It is not possible to create an account yourself. Contact %s if you want an account.") % ('<a href="mailto:%s">%s</a>' % (CFG_SITE_SUPPORT_EMAIL, CFG_SITE_SUPPORT_EMAIL)) + "</p>"
return out
def tmpl_account_adminactivities(self, ln, uid, guest, roles, activities):
"""
Displays the admin activities block for this user
Parameters:
- 'ln' *string* - The language to display the interface in
- 'uid' *string* - The used id
- 'guest' *boolean* - If the user is guest
- 'roles' *array* - The current user roles
- 'activities' *array* - The user allowed activities
"""
# load the right message language
_ = gettext_set_language(ln)
out = ""
# guest condition
if guest:
return _("You seem to be a guest user. You have to %(x_url_open)slogin%(x_url_close)s first.") % \
{'x_url_open': '<a href="' + CFG_SITE_SECURE_URL + '/youraccount/login?ln=' + ln + '">',
'x_url_close': '<a/>'}
# no rights condition
if not roles:
return "<p>" + _("You are not authorized to access administrative functions.") + "</p>"
# displaying form
out += "<p>" + _("You are enabled to the following roles: %(x_role)s.") % {'x_role': ('<em>' + ", ".join(roles) + "</em>")} + '</p>'
if activities:
# print proposed links:
activities.sort(lambda x, y: cmp(x.lower(), y.lower()))
tmp_out = ''
for action in activities:
if action == "runbibedit":
tmp_out += """<br /> <a href="%s/%s/edit/">%s</a>""" % (CFG_SITE_URL, CFG_SITE_RECORD, _("Run Record Editor"))
if action == "runbibeditmulti":
tmp_out += """<br /> <a href="%s/%s/multiedit/">%s</a>""" % (CFG_SITE_URL, CFG_SITE_RECORD, _("Run Multi-Record Editor"))
if action == "runauthorlist":
tmp_out += """<br /> <a href="%s/authorlist/">%s</a>""" % (CFG_SITE_URL, _("Run Author List Manager"))
if action == "runbibcirculation":
tmp_out += """<br /> <a href="%s/admin/bibcirculation/bibcirculationadmin.py?ln=%s">%s</a>""" % (CFG_SITE_URL, ln, _("Run BibCirculation"))
if action == "runbibmerge":
tmp_out += """<br /> <a href="%s/%s/merge/">%s</a>""" % (CFG_SITE_URL, CFG_SITE_RECORD, _("Run Record Merger"))
if action == "runbibswordclient":
tmp_out += """<br /> <a href="%s/%s/bibsword/">%s</a>""" % (CFG_SITE_URL, CFG_SITE_RECORD, _("Run BibSword Client"))
if action == "runbatchuploader":
tmp_out += """<br /> <a href="%s/batchuploader/metadata?ln=%s">%s</a>""" % (CFG_SITE_URL, ln, _("Run Batch Uploader"))
if action == "cfgbibformat":
tmp_out += """<br /> <a href="%s/admin/bibformat/bibformatadmin.py?ln=%s">%s</a>""" % (CFG_SITE_URL, ln, _("Configure BibFormat"))
if action == "cfgbibknowledge":
tmp_out += """<br /> <a href="%s/kb?ln=%s">%s</a>""" % (CFG_SITE_URL, ln, _("Configure BibKnowledge"))
if action == "cfgoaiharvest":
tmp_out += """<br /> <a href="%s/admin/oaiharvest/oaiharvestadmin.py?ln=%s">%s</a>""" % (CFG_SITE_URL, ln, _("Configure OAI Harvest"))
if action == "cfgoairepository":
tmp_out += """<br /> <a href="%s/admin/oairepository/oairepositoryadmin.py?ln=%s">%s</a>""" % (CFG_SITE_URL, ln, _("Configure OAI Repository"))
if action == "cfgbibindex":
tmp_out += """<br /> <a href="%s/admin/bibindex/bibindexadmin.py?ln=%s">%s</a>""" % (CFG_SITE_URL, ln, _("Configure BibIndex"))
if action == "cfgbibrank":
tmp_out += """<br /> <a href="%s/admin/bibrank/bibrankadmin.py?ln=%s">%s</a>""" % (CFG_SITE_URL, ln, _("Configure BibRank"))
if action == "cfgwebaccess":
tmp_out += """<br /> <a href="%s/admin/webaccess/webaccessadmin.py?ln=%s">%s</a>""" % (CFG_SITE_URL, ln, _("Configure WebAccess"))
if action == "cfgwebcomment":
tmp_out += """<br /> <a href="%s/admin/webcomment/webcommentadmin.py?ln=%s">%s</a>""" % (CFG_SITE_URL, ln, _("Configure WebComment"))
if action == "cfgweblinkback":
tmp_out += """<br /> <a href="%s/admin/weblinkback/weblinkbackadmin.py?ln=%s">%s</a>""" % (CFG_SITE_URL, ln, _("Configure WebLinkback"))
if action == "cfgwebjournal":
tmp_out += """<br /> <a href="%s/admin/webjournal/webjournaladmin.py?ln=%s">%s</a>""" % (CFG_SITE_URL, ln, _("Configure WebJournal"))
if action == "cfgwebsearch":
tmp_out += """<br /> <a href="%s/admin/websearch/websearchadmin.py?ln=%s">%s</a>""" % (CFG_SITE_URL, ln, _("Configure WebSearch"))
if action == "cfgwebsubmit":
tmp_out += """<br /> <a href="%s/admin/websubmit/websubmitadmin.py?ln=%s">%s</a>""" % (CFG_SITE_URL, ln, _("Configure WebSubmit"))
if action == "runbibdocfile":
tmp_out += """<br /> <a href="%s/%s/managedocfiles?ln=%s">%s</a>""" % (CFG_SITE_URL, CFG_SITE_RECORD, ln, _("Run Document File Manager"))
if action == "cfgbibsort":
tmp_out += """<br /> <a href="%s/admin/bibsort/bibsortadmin.py?ln=%s">%s</a>""" % (CFG_SITE_URL, ln, _("Configure BibSort"))
if action == "runinfomanager":
tmp_out += """<br /> <a href="%s/info/manage?ln=%s">%s</a>""" % (CFG_SITE_URL, ln, _("Run Info Space Manager"))
if tmp_out:
out += _("Here are some interesting web admin links for you:") + tmp_out
out += "<br />" + _("For more admin-level activities, see the complete %(x_url_open)sAdmin Area%(x_url_close)s.") %\
{'x_url_open': '<a href="' + CFG_SITE_URL + '/help/admin?ln=' + ln + '">',
'x_url_close': '</a>'}
return out
def tmpl_create_userinfobox(self, ln, url_referer, guest, username, submitter, referee, admin, usebaskets, usemessages, usealerts, usegroups, useloans, usestats):
"""
Displays the user block
Parameters:
- 'ln' *string* - The language to display the interface in
- 'url_referer' *string* - URL of the page being displayed
- 'guest' *boolean* - If the user is guest
- 'username' *string* - The username (nickname or email)
- 'submitter' *boolean* - If the user is submitter
- 'referee' *boolean* - If the user is referee
- 'admin' *boolean* - If the user is admin
- 'usebaskets' *boolean* - If baskets are enabled for the user
- 'usemessages' *boolean* - If messages are enabled for the user
- 'usealerts' *boolean* - If alerts are enabled for the user
- 'usegroups' *boolean* - If groups are enabled for the user
- 'useloans' *boolean* - If loans are enabled for the user
- 'usestats' *boolean* - If stats are enabled for the user
@note: with the update of CSS classes (cds.cds ->
invenio.css), the variables useloans etc are not used in
this function, since they are in the menus. But we keep
them in the function signature for backwards
compatibility.
"""
# load the right message language
_ = gettext_set_language(ln)
out = """<img src="%s/img/user-icon-1-20x20.gif" border="0" alt=""/> """ % CFG_SITE_URL
if guest:
out += """%(guest_msg)s ::
<a class="userinfo" href="%(sitesecureurl)s/youraccount/login?ln=%(ln)s%(referer)s">%(login)s</a>""" % {
'sitesecureurl': CFG_SITE_SECURE_URL,
'ln' : ln,
'guest_msg' : _("guest"),
'referer' : url_referer and ('&referer=%s' % urllib.quote(url_referer)) or '',
'login' : _('login')
}
else:
out += """
<a class="userinfo" href="%(sitesecureurl)s/youraccount/display?ln=%(ln)s">%(username)s</a> :: """ % {
'sitesecureurl' : CFG_SITE_SECURE_URL,
'ln' : ln,
'username' : username
}
out += """<a class="userinfo" href="%(sitesecureurl)s/youraccount/logout?ln=%(ln)s">%(logout)s</a>""" % {
'sitesecureurl' : CFG_SITE_SECURE_URL,
'ln' : ln,
'logout' : _("logout"),
}
return out
def tmpl_create_useractivities_menu(self, ln, selected, url_referer, guest, username, submitter, referee, admin, usebaskets, usemessages, usealerts, usegroups, useloans, usestats, usecomments):
"""
Returns the main navigation menu with actions based on user's
priviledges
@param ln: The language to display the interface in
@type ln: string
@param selected: If the menu is currently selected
@type selected: boolean
@param url_referer: URL of the page being displayed
@type url_referer: string
@param guest: If the user is guest
@type guest: string
@param username: The username (nickname or email)
@type username: string
@param submitter: If the user is submitter
@type submitter: boolean
@param referee: If the user is referee
@type referee: boolean
@param admin: If the user is admin
@type admin: boolean
@param usebaskets: If baskets are enabled for the user
@type usebaskets: boolean
@param usemessages: If messages are enabled for the user
@type usemessages: boolean
@param usealerts: If alerts are enabled for the user
@type usealerts: boolean
@param usegroups: If groups are enabled for the user
@type usegroups: boolean
@param useloans: If loans are enabled for the user
@type useloans: boolean
@param usestats: If stats are enabled for the user
@type usestats: boolean
@param usecomments: If comments are enabled for the user
@type usecomments: boolean
@return: html menu of the user activities
@rtype: string
"""
# load the right message language
_ = gettext_set_language(ln)
out = '''<div class="hassubmenu%(on)s">
<a hreflang="en" class="header%(selected)s" href="%(CFG_SITE_SECURE_URL)s/youraccount/display?ln=%(ln)s">%(personalize)s</a>
<ul class="subsubmenu">''' % {
'CFG_SITE_SECURE_URL' : CFG_SITE_SECURE_URL,
'ln' : ln,
'personalize': _("Personalize"),
'on': selected and " on" or '',
'selected': selected and "selected" or ''
}
if not guest:
out += '<li><a href="%(CFG_SITE_SECURE_URL)s/youraccount/display?ln=%(ln)s">%(account)s</a></li>' % {
'CFG_SITE_SECURE_URL' : CFG_SITE_SECURE_URL,
'ln' : ln,
'account' : _('Your account')
}
if usealerts or guest:
out += '<li><a href="%(CFG_SITE_SECURE_URL)s/youralerts/list?ln=%(ln)s">%(alerts)s</a></li>' % {
'CFG_SITE_SECURE_URL' : CFG_SITE_SECURE_URL,
'ln' : ln,
'alerts' : _('Your alerts')
}
if referee:
out += '<li><a href="%(CFG_SITE_SECURE_URL)s/yourapprovals.py?ln=%(ln)s">%(approvals)s</a></li>' % {
'CFG_SITE_SECURE_URL' : CFG_SITE_SECURE_URL,
'ln' : ln,
'approvals' : _('Your approvals')
}
if usebaskets or guest:
out += '<li><a href="%(CFG_SITE_SECURE_URL)s/yourbaskets/display?ln=%(ln)s">%(baskets)s</a></li>' % {
'CFG_SITE_SECURE_URL' : CFG_SITE_SECURE_URL,
'ln' : ln,
'baskets' : _('Your baskets')
}
if usecomments:
out += '<li><a href="%(CFG_SITE_SECURE_URL)s/yourcomments?ln=%(ln)s">%(comments)s</a></li>' % {
'CFG_SITE_SECURE_URL' : CFG_SITE_SECURE_URL,
'ln' : ln,
'comments' : _('Your comments')
}
if usegroups:
out += '<li><a href="%(CFG_SITE_SECURE_URL)s/yourgroups/display?ln=%(ln)s">%(groups)s</a></li>' % {
'CFG_SITE_SECURE_URL' : CFG_SITE_SECURE_URL,
'ln' : ln,
'groups' : _('Your groups')
}
if useloans:
out += '<li><a href="%(CFG_SITE_SECURE_URL)s/yourloans/display?ln=%(ln)s">%(loans)s</a></li>' % {
'CFG_SITE_SECURE_URL' : CFG_SITE_SECURE_URL,
'ln' : ln,
'loans' : _('Your loans')
}
if usemessages:
out += '<li><a href="%(CFG_SITE_SECURE_URL)s/yourmessages/display?ln=%(ln)s">%(messages)s</a></li>' % {
'CFG_SITE_SECURE_URL' : CFG_SITE_SECURE_URL,
'ln' : ln,
'messages' : _('Your messages')
}
if submitter:
out += '<li><a href="%(CFG_SITE_SECURE_URL)s/yoursubmissions.py?ln=%(ln)s">%(submissions)s</a></li>' % {
'CFG_SITE_SECURE_URL' : CFG_SITE_SECURE_URL,
'ln' : ln,
'submissions' : _('Your submissions')
}
if usealerts or guest:
out += '<li><a href="%(CFG_SITE_SECURE_URL)s/youralerts/display?ln=%(ln)s">%(searches)s</a></li>' % {
'CFG_SITE_SECURE_URL' : CFG_SITE_SECURE_URL,
'ln' : ln,
'searches' : _('Your searches')
}
out += '</ul></div>'
return out
def tmpl_create_adminactivities_menu(self, ln, selected, url_referer, guest, username, submitter, referee, admin, usebaskets, usemessages, usealerts, usegroups, useloans, usestats, activities):
"""
Returns the main navigation menu with actions based on user's
priviledges
@param ln: The language to display the interface in
@type ln: string
@param selected: If the menu is currently selected
@type selected: boolean
@param url_referer: URL of the page being displayed
@type url_referer: string
@param guest: If the user is guest
@type guest: string
@param username: The username (nickname or email)
@type username: string
@param submitter: If the user is submitter
@type submitter: boolean
@param referee: If the user is referee
@type referee: boolean
@param admin: If the user is admin
@type admin: boolean
@param usebaskets: If baskets are enabled for the user
@type usebaskets: boolean
@param usemessages: If messages are enabled for the user
@type usemessages: boolean
@param usealerts: If alerts are enabled for the user
@type usealerts: boolean
@param usegroups: If groups are enabled for the user
@type usegroups: boolean
@param useloans: If loans are enabled for the user
@type useloans: boolean
@param usestats: If stats are enabled for the user
@type usestats: boolean
@param activities: dictionary of admin activities
@rtype activities: dict
@return: html menu of the user activities
@rtype: string
"""
# load the right message language
_ = gettext_set_language(ln)
out = ''
if activities:
out += '''<div class="hassubmenu%(on)s">
<a hreflang="en" class="header%(selected)s" href="%(CFG_SITE_SECURE_URL)s/youraccount/youradminactivities?ln=%(ln)s">%(admin)s</a>
<ul class="subsubmenu">''' % {
'CFG_SITE_SECURE_URL' : CFG_SITE_SECURE_URL,
'ln' : ln,
'admin': _("Administration"),
'on': selected and " on" or '',
'selected': selected and "selected" or ''
}
for name in sorted(activities.iterkeys()):
url = activities[name]
out += '<li><a href="%(url)s">%(name)s</a></li>' % {
'url': url,
'name': name
}
if usestats:
out += """<li><a href="%(CFG_SITE_URL)s/stats/?ln=%(ln)s">%(stats)s</a></li>""" % {
'CFG_SITE_URL' : CFG_SITE_URL,
'ln' : ln,
'stats' : _("Statistics"),
}
out += '</ul></div>'
return out
def tmpl_warning(self, warnings, ln=CFG_SITE_LANG):
"""
Display len(warnings) warning fields
@param infos: list of strings
@param ln=language
@return: html output
"""
if not((type(warnings) is list) or (type(warnings) is tuple)):
warnings = [warnings]
warningbox = ""
if warnings != []:
warningbox = "<div class=\"warningbox\">\n <b>Warning:</b>\n"
for warning in warnings:
lines = warning.split("\n")
warningbox += " <p>"
for line in lines[0:-1]:
warningbox += line + " <br />\n"
warningbox += lines[-1] + " </p>"
warningbox += "</div><br />\n"
return warningbox
def tmpl_error(self, error, ln=CFG_SITE_LANG):
"""
Display error
@param error: string
@param ln=language
@return: html output
"""
_ = gettext_set_language(ln)
errorbox = ""
if error != "":
errorbox = "<div class=\"errorbox\">\n <b>Error:</b>\n"
errorbox += " <p>"
errorbox += error + " </p>"
errorbox += "</div><br />\n"
return errorbox
def tmpl_display_all_groups(self,
infos,
admin_group_html,
member_group_html,
external_group_html = None,
warnings=[],
ln=CFG_SITE_LANG):
"""
Displays the 3 tables of groups: admin, member and external
Parameters:
- 'ln' *string* - The language to display the interface in
- 'admin_group_html' *string* - HTML code for displaying all the groups
the user is the administrator of
- 'member_group_html' *string* - HTML code for displaying all the groups
the user is member of
- 'external_group_html' *string* - HTML code for displaying all the
external groups the user is member of
"""
_ = gettext_set_language(ln)
group_text = self.tmpl_infobox(infos)
group_text += self.tmpl_warning(warnings)
if external_group_html:
group_text += """
<table>
<tr>
<td>%s</td>
</tr>
<tr>
<td><br />%s</td>
</tr>
<tr>
<td><br /><a name='external_groups'></a>%s</td>
</tr>
</table>""" %(admin_group_html, member_group_html, external_group_html)
else:
group_text += """
<table>
<tr>
<td>%s</td>
</tr>
<tr>
<td><br />%s</td>
</tr>
</table>""" %(admin_group_html, member_group_html)
return group_text
def tmpl_display_admin_groups(self, groups, ln=CFG_SITE_LANG):
"""
Display the groups the user is admin of.
Parameters:
- 'ln' *string* - The language to display the interface in
- 'groups' *list* - All the group the user is admin of
- 'infos' *list* - Display infos on top of admin group table
"""
_ = gettext_set_language(ln)
img_link = """
<a href="%(siteurl)s/yourgroups/%(action)s?grpID=%(grpID)s&ln=%(ln)s">
<img src="%(siteurl)s/img/%(img)s" alt="%(text)s" style="border:0" width="25"
height="25" /><br /><small>%(text)s</small>
</a>"""
out = self.tmpl_group_table_title(img="/img/group_admin.png",
text=_("You are an administrator of the following groups:") )
out += """
<table class="mailbox">
<thead class="mailboxheader">
<tr class="inboxheader">
<td>%s</td>
<td>%s</td>
<td style="width: 20px;" > </td>
<td style="width: 20px;"> </td>
</tr>
</thead>
<tfoot>
<tr style="height:0px;">
<td></td>
<td></td>
<td></td>
<td></td>
</tr>
</tfoot>
<tbody class="mailboxbody">""" %(_("Group"), _("Description"))
if len(groups) == 0:
out += """
<tr class="mailboxrecord" style="height: 100px;">
<td colspan="4" style="text-align: center;">
<small>%s</small>
</td>
</tr>""" %(_("You are not an administrator of any groups."),)
for group_data in groups:
(grpID, name, description) = group_data
edit_link = img_link % {'siteurl' : CFG_SITE_URL,
'grpID' : grpID,
'ln': ln,
'img':"webbasket_create_small.png",
'text':_("Edit group"),
'action':"edit"
}
members_link = img_link % {'siteurl' : CFG_SITE_URL,
'grpID' : grpID,
'ln': ln,
'img':"webbasket_usergroup.png",
'text':_("Edit %s members") % '',
'action':"members"
}
out += """
<tr class="mailboxrecord">
<td>%s</td>
<td>%s</td>
<td style="text-align: center;" >%s</td>
<td style="text-align: center;" >%s</td>
</tr>""" % (cgi.escape(name), cgi.escape(description), edit_link, members_link)
out += """
<tr class="mailboxfooter">
<td colspan="2">
<form name="newGroup" action="create?ln=%(ln)s" method="post">
<input type="submit" name="create_group" value="%(write_label)s" class="formbutton" />
</form>
</td>
<td> </td>
<td> </td>
<td> </td>
</tr>
</tbody>
</table>""" % {'ln': ln,
'write_label': _("Create new group"),
}
return out
def tmpl_display_member_groups(self, groups, ln=CFG_SITE_LANG):
"""
Display the groups the user is member of.
Parameters:
- 'ln' *string* - The language to display the interface in
- 'groups' *list* - All the group the user is member of
"""
_ = gettext_set_language(ln)
group_text = self.tmpl_group_table_title(img="/img/webbasket_us.png", text=_("You are a member of the following groups:"))
group_text += """
<table class="mailbox">
<thead class="mailboxheader">
<tr class="inboxheader">
<td>%s</td>
<td>%s</td>
</tr>
</thead>
<tfoot>
<tr style="height:0px;">
<td></td>
<td></td>
</tr>
</tfoot>
<tbody class="mailboxbody">""" % (_("Group"), _("Description"))
if len(groups) == 0:
group_text += """
<tr class="mailboxrecord" style="height: 100px;">
<td colspan="2" style="text-align: center;">
<small>%s</small>
</td>
</tr>""" %(_("You are not a member of any groups."),)
for group_data in groups:
(id, name, description) = group_data
group_text += """
<tr class="mailboxrecord">
<td>%s</td>
<td>%s</td>
</tr>""" % (cgi.escape(name), cgi.escape(description))
group_text += """
<tr class="mailboxfooter">
<td>
<form name="newGroup" action="join?ln=%(ln)s" method="post">
<input type="submit" name="join_group" value="%(join_label)s" class="formbutton" />
</form>
</td>
<td>
<form name="newGroup" action="leave?ln=%(ln)s" method="post">
<input type="submit" name="leave" value="%(leave_label)s" class="formbutton" />
</form>
</td>
</tr>
</tbody>
</table>
""" % {'ln': ln,
'join_label': _("Join new group"),
'leave_label':_("Leave group")
}
return group_text
def tmpl_display_external_groups(self, groups, ln=CFG_SITE_LANG):
"""
Display the external groups the user is member of.
Parameters:
- 'ln' *string* - The language to display the interface in
- 'groups' *list* - All the group the user is member of
"""
_ = gettext_set_language(ln)
group_text = self.tmpl_group_table_title(img="/img/webbasket_us.png", text=_("You are a member of the following external groups:"))
group_text += """
<table class="mailbox">
<thead class="mailboxheader">
<tr class="inboxheader">
<td>%s</td>
<td>%s</td>
</tr>
</thead>
<tfoot>
<tr style="height:0px;">
<td></td>
<td></td>
</tr>
</tfoot>
<tbody class="mailboxbody">""" % (_("Group"), _("Description"))
if len(groups) == 0:
group_text += """
<tr class="mailboxrecord" style="height: 100px;">
<td colspan="2" style="text-align: center;">
<small>%s</small>
</td>
</tr>""" %(_("You are not a member of any external groups."),)
for group_data in groups:
(id, name, description) = group_data
group_text += """
<tr class="mailboxrecord">
<td>%s</td>
<td>%s</td>
</tr>""" % (cgi.escape(name), cgi.escape(description))
group_text += """
</tbody>
</table>
"""
return group_text
def tmpl_display_input_group_info(self,
group_name,
group_description,
join_policy,
act_type="create",
grpID=None,
warnings=[],
ln=CFG_SITE_LANG):
"""
Display group data when creating or updating a group:
Name, description, join_policy.
Parameters:
- 'ln' *string* - The language to display the interface in
- 'group_name' *string* - name of the group
- 'group_description' *string* - description of the group
- 'join_policy' *string* - join policy
- 'act_type' *string* - info about action : create or edit(update)
- 'grpID' *int* - ID of the group(not None in case of group editing)
- 'warnings' *list* - Display warning if values are not correct
"""
_ = gettext_set_language(ln)
#default
hidden_id =""
form_name = "create_group"
action = CFG_SITE_URL + '/yourgroups/create'
button_label = _("Create new group")
button_name = "create_button"
label = _("Create new group")
delete_text = ""
if act_type == "update":
form_name = "update_group"
action = CFG_SITE_URL + '/yourgroups/edit'
button_label = _("Update group")
button_name = "update"
label = _('Edit group %s') % cgi.escape(group_name)
delete_text = """<input type="submit" value="%s" class="formbutton" name="%s" />"""
delete_text %= (_("Delete group"),"delete")
if grpID is not None:
hidden_id = """<input type="hidden" name="grpID" value="%s" />"""
hidden_id %= grpID
out = self.tmpl_warning(warnings)
out += """
<form name="%(form_name)s" action="%(action)s" method="post">
<input type="hidden" name="ln" value="%(ln)s" />
<div style="padding:10px;">
<table class="bskbasket">
<thead class="bskbasketheader">
<tr>
<td class="bskactions">
<img src="%(logo)s" alt="%(label)s" />
</td>
<td class="bsktitle">
<b>%(label)s</b><br />
</td>
</tr>
</thead>
<tfoot>
<tr><td colspan="2"></td></tr>
</tfoot>
<tbody>
<tr>
<td colspan="2">
<table>
<tr>
<td><label for="group_name">%(name_label)s</label></td>
<td>
<input type="text" name="group_name" id="group_name" value="%(group_name)s" />
</td>
</tr>
<tr>
<td><label for="group_description">%(description_label)s</label></td>
<td>
<input type="text" name="group_description" id="group_description" value="%(group_description)s" />
</td>
</tr>
<tr>
<td>%(join_policy_label)s</td>
<td>
%(join_policy)s
</td>
</tr>
</table>
</td>
</tr>
</tbody>
</table>
%(hidden_id)s
<table>
<tr>
<td>
<input type="submit" value="%(button_label)s" class="formbutton" name="%(button_name)s" />
</td>
<td>
%(delete_text)s
</td>
<td>
<input type="submit" value="%(cancel_label)s" class="formbutton" name="cancel" />
</td>
</tr>
</table>
</div>
</form>
"""
out %= {'action' : action,
'logo': CFG_SITE_URL + '/img/webbasket_create.png',
'label': label,
'form_name' : form_name,
'name_label': _("Group name:"),
'delete_text': delete_text,
'description_label': _("Group description:"),
'join_policy_label': _("Group join policy:"),
'group_name': cgi.escape(group_name, 1),
'group_description': cgi.escape(group_description, 1),
'button_label': button_label,
'button_name':button_name,
'cancel_label':_("Cancel"),
'hidden_id':hidden_id,
'ln': ln,
'join_policy' :self.__create_join_policy_selection_menu("join_policy",
join_policy,
ln)
}
return out
def tmpl_display_input_join_group(self,
group_list,
group_name,
group_from_search,
search,
warnings=[],
ln=CFG_SITE_LANG):
"""
Display the groups the user can join.
He can use default select list or the search box
Parameters:
- 'ln' *string* - The language to display the interface in
- 'group_list' *list* - All the group the user can join
- 'group_name' *string* - Name of the group the user is looking for
- 'group_from search' *list* - List of the group the user can join matching group_name
- 'search' *int* - User is looking for group using group_name
- 'warnings' *list* - Display warning if two group are selected
"""
_ = gettext_set_language(ln)
out = self.tmpl_warning(warnings)
search_content = ""
if search:
search_content = """<tr><td> </td><td>"""
if group_from_search != []:
search_content += self.__create_select_menu('grpID', group_from_search, _("Please select:"))
else:
search_content += _("No matching group")
search_content += """</td><td> </td></tr>"""
out += """
<form name="join_group" action="%(action)s" method="post">
<input type="hidden" name="ln" value="%(ln)s" />
<div style="padding:10px;">
<table class="bskbasket">
<thead class="bskbasketheader">
<tr>
<td class="bskactions">
<img src="%(logo)s" alt="%(label)s" />
</td>
<td class="bsktitle">
<b>%(label)s</b><br />
</td>
</tr>
</thead>
<tfoot>
<tr><td colspan="2"></td></tr>
</tfoot>
<tbody>
<tr>
<td colspan="2">
<table>
<tr>
<td>%(list_label)s</td>
<td>
%(group_list)s
</td>
<td>
</td>
</tr>
<tr>
<td><br /><label for="group_name">%(label2)s</label></td>
<td><br /><input type="text" name="group_name" id="group_name" value="%(group_name)s" /></td>
<td><br />
<input type="submit" name="find_button" value="%(find_label)s" class="nonsubmitbutton" />
</td>
</tr>
%(search_content)s
</table>
</td>
</tr>
</tbody>
</table>
<table>
<tr>
<td>
<input type="submit" name="join_button" value="%(label)s" class="formbutton" />
</td>
<td>
<input type="submit" value="%(cancel_label)s" class="formbutton" name="cancel" />
</td>
</tr>
</table>
</div>
</form>
"""
out %= {'action' : CFG_SITE_URL + '/yourgroups/join',
'logo': CFG_SITE_URL + '/img/webbasket_create.png',
'label': _("Join group"),
'group_name': cgi.escape(group_name, 1),
'label2':_("or find it") + ': ',
'list_label':_("Choose group:"),
'ln': ln,
'find_label': _("Find group"),
'cancel_label':_("Cancel"),
'group_list' :self.__create_select_menu("grpID",group_list, _("Please select:")),
'search_content' : search_content
}
return out
def tmpl_display_manage_member(self,
grpID,
group_name,
members,
pending_members,
infos=[],
warnings=[],
ln=CFG_SITE_LANG):
"""Display current members and waiting members of a group.
Parameters:
- 'ln' *string* - The language to display the interface in
- 'grpID *int* - ID of the group
- 'group_name' *string* - Name of the group
- 'members' *list* - List of the current members
- 'pending_members' *list* - List of the waiting members
- 'infos' *tuple of 2 lists* - Message to inform user about his last action
- 'warnings' *list* - Display warning if two group are selected
"""
_ = gettext_set_language(ln)
out = self.tmpl_warning(warnings)
out += self.tmpl_infobox(infos)
out += """
<form name="member" action="%(action)s" method="post">
<p>%(title)s</p>
<input type="hidden" name="ln" value="%(ln)s" />
<input type="hidden" name="grpID" value="%(grpID)s"/>
<table>
<tr>
<td>
<table class="bskbasket">
<thead class="bskbasketheader">
<tr>
<td class="bskactions">
<img src="%(imgurl)s/webbasket_usergroup.png" alt="%(img_alt_header1)s" />
</td>
<td class="bsktitle">
%(header1)s<br />
</td>
</tr>
</thead>
<tfoot>
<tr><td colspan="2"></td></tr>
</tfoot>
<tbody>
<tr>
<td colspan="2">
<table>
<tr>
%(member_text)s
</tr>
</table>
</td>
</tr>
</tbody>
</table>
</td>
</tr>
<tr>
<td>
<table class="bskbasket">
<thead class="bskbasketheader">
<tr>
<td class="bskactions">
<img src="%(imgurl)s/webbasket_usergroup_gray.png" alt="%(img_alt_header2)s" />
</td>
<td class="bsktitle">
%(header2)s<br />
</td>
</tr>
</thead>
<tfoot>
<tr><td colspan="2"></td></tr>
</tfoot>
<tbody>
<tr>
<td colspan="2">
<table>
<tr>
%(pending_text)s
</tr>
</table>
</td>
</tr>
</tbody>
</table>
</td>
</tr>
<tr>
<td>
<table class="bskbasket" style="width: 400px">
<thead class="bskbasketheader">
<tr>
<td class="bskactions">
<img src="%(imgurl)s/iconpen.gif" alt="%(img_alt_header3)s" />
</td>
<td class="bsktitle">
<b>%(header3)s</b><br />
</td>
</tr>
</thead>
<tfoot>
<tr><td colspan="2"></td></tr>
</tfoot>
<tbody>
<tr>
<td colspan="2">
<table>
<tr>
<td colspan="2" style="padding: 0 5 10 5;">%(invite_text)s</td>
</tr>
</table>
</td>
</tr>
</tbody>
</table>
</td>
</tr>
<tr>
<td>
<input type="submit" value="%(cancel_label)s" class="formbutton" name="cancel" />
</td>
</tr>
</table>
</form>
"""
if members :
member_list = self.__create_select_menu("member_id", members, _("Please select:"))
member_text = """
<td style="padding: 0 5 10 5;">%s</td>
<td style="padding: 0 5 10 5;">
<input type="submit" name="remove_member" value="%s" class="nonsubmitbutton"/>
</td>""" % (member_list,_("Remove member"))
else :
member_text = """<td style="padding: 0 5 10 5;" colspan="2">%s</td>""" % _("No members.")
if pending_members :
pending_list = self.__create_select_menu("pending_member_id", pending_members, _("Please select:"))
pending_text = """
<td style="padding: 0 5 10 5;">%s</td>
<td style="padding: 0 5 10 5;">
<input type="submit" name="add_member" value="%s" class="nonsubmitbutton"/>
</td>
<td style="padding: 0 5 10 5;">
<input type="submit" name="reject_member" value="%s" class="nonsubmitbutton"/>
</td>""" % (pending_list,_("Accept member"), _("Reject member"))
else :
pending_text = """<td style="padding: 0 5 10 5;" colspan="2">%s</td>""" % _("No members awaiting approval.")
header1 = self.tmpl_group_table_title(text=_("Current members"))
header2 = self.tmpl_group_table_title(text=_("Members awaiting approval"))
header3 = _("Invite new members")
write_a_message_url = create_url(
"%s/yourmessages/write" % CFG_SITE_URL,
{
'ln' : ln,
'msg_subject' : _('Invitation to join "%s" group' % escape_html(group_name)),
'msg_body' : _("""\
Hello:
I think you might be interested in joining the group "%(x_name)s".
You can join by clicking here: %(x_url)s.
Best regards.
""") % {'x_name': group_name,
'x_url': create_html_link("%s/yourgroups/join" % CFG_SITE_URL, { 'grpID' : grpID,
'join_button' : "1",
},
link_label=group_name, escape_urlargd=True, escape_linkattrd=True)}})
link_open = '<a href="%s">' % escape_html(write_a_message_url)
invite_text = _("If you want to invite new members to join your group, please use the %(x_url_open)sweb message%(x_url_close)s system.") % \
{'x_url_open': link_open,
'x_url_close': '</a>'}
action = CFG_SITE_URL + '/yourgroups/members?ln=' + ln
out %= {'title':_('Group: %s') % escape_html(group_name),
'member_text' : member_text,
'pending_text' :pending_text,
'action':action,
'grpID':grpID,
'header1': header1,
'header2': header2,
'header3': header3,
'img_alt_header1': _("Current members"),
'img_alt_header2': _("Members awaiting approval"),
'img_alt_header3': _("Invite new members"),
'invite_text': invite_text,
'imgurl': CFG_SITE_URL + '/img',
'cancel_label':_("Cancel"),
'ln':ln
}
return out
def tmpl_display_input_leave_group(self,
groups,
warnings=[],
ln=CFG_SITE_LANG):
"""Display groups the user can leave.
Parameters:
- 'ln' *string* - The language to display the interface in
- 'groups' *list* - List of groups the user is currently member of
- 'warnings' *list* - Display warning if no group is selected
"""
_ = gettext_set_language(ln)
out = self.tmpl_warning(warnings)
out += """
<form name="leave" action="%(action)s" method="post">
<input type="hidden" name="ln" value="%(ln)s" />
<div style="padding:10px;">
<table class="bskbasket">
<thead class="bskbasketheader">
<tr>
<td class="bskactions">
<img src="%(logo)s" alt="%(label)s" />
</td>
<td class="bsktitle">
<b>%(label)s</b><br />
</td>
</tr>
</thead>
<tfoot>
<tr><td colspan="2"></td></tr>
</tfoot>
<tbody>
<tr>
<td colspan="2">
<table>
<tr>
<td>%(list_label)s</td>
<td>
%(groups)s
</td>
<td>
</td>
</tr>
</table>
</td>
</tr>
</tbody>
</table>
<table>
<tr>
<td>
%(submit)s
</td>
<td>
<input type="submit" value="%(cancel_label)s" class="formbutton" name="cancel" />
</td>
</tr>
</table>
</div>
</form>
"""
if groups:
groups = self.__create_select_menu("grpID", groups, _("Please select:"))
list_label = _("Group list")
submit = """<input type="submit" name="leave_button" value="%s" class="formbutton"/>""" % _("Leave group")
else :
groups = _("You are not member of any group.")
list_label = ""
submit = ""
action = CFG_SITE_URL + '/yourgroups/leave?ln=%s'
action %= (ln)
out %= {'groups' : groups,
'list_label' : list_label,
'action':action,
'logo': CFG_SITE_URL + '/img/webbasket_create.png',
'label' : _("Leave group"),
'cancel_label':_("Cancel"),
'ln' :ln,
'submit' : submit
}
return out
def tmpl_confirm_delete(self, grpID, ln=CFG_SITE_LANG):
"""
display a confirm message when deleting a group
@param grpID *int* - ID of the group
@param ln: language
@return: html output
"""
_ = gettext_set_language(ln)
action = CFG_SITE_URL + '/yourgroups/edit'
out = """
<form name="delete_group" action="%(action)s" method="post">
<table class="confirmoperation">
<tr>
<td colspan="2" class="confirmmessage">
%(message)s
</td>
</tr>
<tr>
<td>
<input type="hidden" name="confirmed" value="1" />
<input type="hidden" name="ln" value="%(ln)s" />
<input type="hidden" name="grpID" value="%(grpID)s" />
<input type="submit" name="delete" value="%(yes_label)s" class="formbutton" />
</td>
<td>
<input type="hidden" name="ln" value="%(ln)s" />
<input type="hidden" name="grpID" value="%(grpID)s" />
<input type="submit" value="%(no_label)s" class="formbutton" />
</td>
</tr>
</table>
</form>"""% {'message': _("Are you sure you want to delete this group?"),
'ln':ln,
'yes_label': _("Yes"),
'no_label': _("No"),
'grpID':grpID,
'action': action
}
return out
def tmpl_confirm_leave(self, uid, grpID, ln=CFG_SITE_LANG):
"""
display a confirm message
@param grpID *int* - ID of the group
@param ln: language
@return: html output
"""
_ = gettext_set_language(ln)
action = CFG_SITE_URL + '/yourgroups/leave'
out = """
<form name="leave_group" action="%(action)s" method="post">
<table class="confirmoperation">
<tr>
<td colspan="2" class="confirmmessage">
%(message)s
</td>
</tr>
<tr>
<td>
<input type="hidden" name="confirmed" value="1" />
<input type="hidden" name="ln" value="%(ln)s" />
<input type="hidden" name="grpID" value="%(grpID)s" />
<input type="submit" name="leave_button" value="%(yes_label)s" class="formbutton" />
</td>
<td>
<input type="hidden" name="ln" value="%(ln)s" />
<input type="hidden" name="grpID" value="%(grpID)s" />
<input type="submit" value="%(no_label)s" class="formbutton" />
</td>
</tr>
</table>
</form>"""% {'message': _("Are you sure you want to leave this group?"),
'ln':ln,
'yes_label': _("Yes"),
'no_label': _("No"),
'grpID':grpID,
'action': action
}
return out
def __create_join_policy_selection_menu(self, name, current_join_policy, ln=CFG_SITE_LANG):
"""Private function. create a drop down menu for selection of join policy
@param current_join_policy: join policy as defined in CFG_WEBSESSION_GROUP_JOIN_POLICY
@param ln: language
"""
_ = gettext_set_language(ln)
elements = [(CFG_WEBSESSION_GROUP_JOIN_POLICY['VISIBLEOPEN'],
_("Visible and open for new members")),
(CFG_WEBSESSION_GROUP_JOIN_POLICY['VISIBLEMAIL'],
_("Visible but new members need approval"))
]
select_text = _("Please select:")
return self.__create_select_menu(name, elements, select_text, selected_key=current_join_policy)
def __create_select_menu(self, name, elements, select_text, multiple=0, selected_key=None):
""" private function, returns a popup menu
@param name: name of HTML control
@param elements: list of (key, value)
"""
if multiple :
out = """
<select name="%s" multiple="multiple" style="width:100%%">"""% (name)
else :
out = """<select name="%s" style="width:100%%">""" % name
out += '<option value="-1">%s</option>' % (select_text)
for (key, label) in elements:
selected = ''
if key == selected_key:
selected = ' selected="selected"'
out += '<option value="%s"%s>%s</option>'% (key, selected, label)
out += '</select>'
return out
def tmpl_infobox(self, infos, ln=CFG_SITE_LANG):
"""Display len(infos) information fields
@param infos: list of strings
@param ln=language
@return: html output
"""
_ = gettext_set_language(ln)
if not((type(infos) is list) or (type(infos) is tuple)):
infos = [infos]
infobox = ""
for info in infos:
infobox += '<div><span class="info">'
lines = info.split("\n")
for line in lines[0:-1]:
infobox += line + "<br />\n"
infobox += lines[-1] + "</span></div>\n"
return infobox
def tmpl_navtrail(self, ln=CFG_SITE_LANG, title=""):
"""
display the navtrail, e.g.:
Your account > Your group > title
@param title: the last part of the navtrail. Is not a link
@param ln: language
return html formatted navtrail
"""
_ = gettext_set_language(ln)
nav_h1 = '<a class="navtrail" href="%s/youraccount/display">%s</a>'
nav_h2 = ""
if (title != ""):
nav_h2 = ' > <a class="navtrail" href="%s/yourgroups/display">%s</a>'
nav_h2 = nav_h2 % (CFG_SITE_URL, _("Your Groups"))
return nav_h1 % (CFG_SITE_URL, _("Your Account")) + nav_h2
def tmpl_group_table_title(self, img="", text="", ln=CFG_SITE_LANG):
"""
display the title of a table:
- 'img' *string* - img path
- 'text' *string* - title
- 'ln' *string* - The language to display the interface in
"""
out = "<div>"
if img:
out += """
<img src="%s" alt="" />
""" % (CFG_SITE_URL + img)
out += """
<b>%s</b>
</div>""" % text
return out
def tmpl_admin_msg(self, group_name, grpID, ln=CFG_SITE_LANG):
"""
return message content for joining group
- 'group_name' *string* - name of the group
- 'grpID' *int* - ID of the group
- 'ln' *string* - The language to display the interface in
"""
_ = gettext_set_language(ln)
subject = _("Group %s: New membership request") % group_name
url = CFG_SITE_URL + "/yourgroups/members?grpID=%s&ln=%s"
url %= (grpID, ln)
# FIXME: which user? We should show his nickname.
body = (_("A user wants to join the group %s.") % group_name) + '<br />'
body += _("Please %(x_url_open)saccept or reject%(x_url_close)s this user's request.") % {'x_url_open': '<a href="' + url + '">',
'x_url_close': '</a>'}
body += '<br />'
return subject, body
def tmpl_member_msg(self,
group_name,
accepted=0,
ln=CFG_SITE_LANG):
"""
return message content when new member is accepted/rejected
- 'group_name' *string* - name of the group
- 'accepted' *int* - 1 if new membership has been accepted, 0 if it has been rejected
- 'ln' *string* - The language to display the interface in
"""
_ = gettext_set_language(ln)
if accepted:
subject = _("Group %s: Join request has been accepted") % (group_name)
body = _("Your request for joining group %s has been accepted.") % (group_name)
else:
subject = _("Group %s: Join request has been rejected") % (group_name)
body = _("Your request for joining group %s has been rejected.") % (group_name)
url = CFG_SITE_URL + "/yourgroups/display?ln=" + ln
body += '<br />'
body += _("You can consult the list of %(x_url_open)syour groups%(x_url_close)s.") % {'x_url_open': '<a href="' + url + '">',
'x_url_close': '</a>'}
body += '<br />'
return subject, body
def tmpl_delete_msg(self,
group_name,
ln=CFG_SITE_LANG):
"""
return message content when new member is accepted/rejected
- 'group_name' *string* - name of the group
- 'ln' *string* - The language to display the interface in
"""
_ = gettext_set_language(ln)
subject = _("Group %s has been deleted") % group_name
url = CFG_SITE_URL + "/yourgroups/display?ln=" + ln
body = _("Group %s has been deleted by its administrator.") % group_name
body += '<br />'
body += _("You can consult the list of %(x_url_open)syour groups%(x_url_close)s.") % {'x_url_open': '<a href="' + url + '">',
'x_url_close': '</a>'}
body += '<br />'
return subject, body
def tmpl_group_info(self, nb_admin_groups=0, nb_member_groups=0, nb_total_groups=0, ln=CFG_SITE_LANG):
"""
display infos about groups (used by myaccount.py)
@param nb_admin_group: number of groups the user is admin of
@param nb_member_group: number of groups the user is member of
@param total_group: number of groups the user belongs to
@param ln: language
return: html output.
"""
_ = gettext_set_language(ln)
out = _("You can consult the list of %(x_url_open)s%(x_nb_total)i groups%(x_url_close)s you are subscribed to (%(x_nb_member)i) or administering (%(x_nb_admin)i).")
out %= {'x_url_open': '<a href="' + CFG_SITE_URL + '/yourgroups/display?ln=' + ln + '">',
'x_nb_total': nb_total_groups,
'x_url_close': '</a>',
'x_nb_admin': nb_admin_groups,
'x_nb_member': nb_member_groups}
return out
def tmpl_general_warnings(self, warning_list, ln=CFG_SITE_LANG):
"""
display information to the admin user about possible
ssecurity problems in the system.
"""
message = ""
_ = gettext_set_language(ln)
#Try and connect to the mysql database with the default invenio password
if "warning_mysql_password_equal_to_invenio_password" in warning_list:
message += "<p><font color=red>"
message += _("Warning: The password set for MySQL root user is the same as the default Invenio password. For security purposes, you may want to change the password.")
message += "</font></p>"
#Try and connect to the invenio database with the default invenio password
if "warning_invenio_password_equal_to_default" in warning_list:
message += "<p><font color=red>"
message += _("Warning: The password set for the Invenio MySQL user is the same as the shipped default. For security purposes, you may want to change the password.")
message += "</font></p>"
#Check if the admin password is empty
if "warning_empty_admin_password" in warning_list:
message += "<p><font color=red>"
message += _("Warning: The password set for the Invenio admin user is currently empty. For security purposes, it is strongly recommended that you add a password.")
message += "</font></p>"
#Check if the admin email has been changed from the default
if "warning_site_support_email_equal_to_default" in warning_list:
message += "<p><font color=red>"
message += _("Warning: The email address set for support email is currently set to info@invenio-software.org. It is recommended that you change this to your own address.")
message += "</font></p>"
#Check for a new release
if "note_new_release_available" in warning_list:
message += "<p><font color=red>"
message += _("A newer version of Invenio is available for download. You may want to visit ")
message += "<a href=\"http://invenio-software.org/wiki/Installation/Download\">http://invenio-software.org/wiki/Installation/Download</a>"
message += "</font></p>"
#Error downloading release notes
if "error_cannot_download_release_notes" in warning_list:
message += "<p><font color=red>"
message += _("Cannot download or parse release notes from http://invenio-software.org/repo/invenio/tree/RELEASE-NOTES")
message += "</font></p>"
if "email_auto_generated" in warning_list:
message += "<p><font color=red>"
message += _("Your e-mail is auto-generated by the system. Please change your e-mail from <a href='%s/youraccount/edit?ln=%s'>account settings</a>.") % (CFG_SITE_SECURE_URL, ln)
message += "</font></p>"
return message
def tmpl_external_login_button(self, provider, referer = '', icon_size = 48,
classes = ""):
"""
Template of the login button for providers which don't need username.
@param provider: The name of the provider
@type provider: str
@param referer: The referer URL - will be redirected upon after login
@type referer: str
@param icon_size: The size of the icon of the provider
@type icon_size: int
@param classes: Additional classes for the login form
@type classes: str
@rtype: str
"""
login_url = CFG_SITE_SECURE_URL + "/youraccount/"
if provider in CFG_OPENID_PROVIDERS:
login_url += 'openid'
elif provider in CFG_OAUTH2_PROVIDERS:
login_url += 'oauth2'
elif provider in CFG_OAUTH1_PROVIDERS:
login_url += 'oauth1'
login_url += '?'
if referer:
if not 'youraccount/login' in referer:
login_url += "referer=" + referer + "&"
out = ""
out += """
<div class="login_button %(class)s" id="%(provider)s_login_button">
<div class="provider_img" id="%(provider)s_img">
<a class="openid_url" id="%(provider)s_login" href="%(loginurl)s\
provider=%(provider)s">
<img class="external_provider %(class)s" src="%(imgurl)s/\
%(provider)s_icon_%(icon_size)s.png"></img>
</a>
</div>
</div>""" % {
'loginurl': login_url,
'imgurl': CFG_SITE_SECURE_URL + "/img",
'provider': provider,
'class': classes,
'icon_size': icon_size
}
return out
def tmpl_external_login_form(self, provider, referer = '', icon_size = 48,
classes = "", label = "%(provider)s username"):
"""
Template of the login form for providers which need an username for
verification.
@param provider: The name of the provider
@type provider: str
@param referer: The referer URL - will be redirected upon after login
@type referer: str
@param icon_size: The size of the icon of the provider
@type icon_size: int
@param classes: Additional classes for the login form
@type classes: str
@param label: The label for text input.
@param label: str
@rtype: str
"""
login_url = CFG_SITE_SECURE_URL + "/youraccount/"
if provider in CFG_OPENID_PROVIDERS:
login_url += 'openid'
elif provider in CFG_OAUTH2_PROVIDERS:
login_url += 'oauth2'
elif provider in CFG_OAUTH1_PROVIDERS:
login_url += 'oauth1'
label %= {'provider': provider}
out = ""
out += """
<div class="login_button %(class)s login_form" id="%(provider)s_verify_form">
<div class="provider_img with_login_form" id="%(provider)s_login_img" \
onclick="show_username_form(this)">
<img class="external_provider %(class)s" src="%(imgurl)s/\
%(provider)s_icon_%(icon_size)s.png"></img>
</div>
<div class="login_content with_label" id="%(provider)s_verifier" hidden=\
"hidden">
<form method="get" accept-charset="UTF-8" action="%(loginurl)s">
<input type="hidden" name="provider" value="%(provider)s">
<input type="hidden" name="referer" value="%(referer)s">
<label class="openid_label" for="%(provider)s">%(label)s:</label>
</br>
<input class="openid_input" id="%(provider)s_username_field" \
type="text" name="identifier" value="" >
<input type="submit" value=" Login ">
</form>
</div>
</div>
""" % {
'loginurl': login_url,
'imgurl': CFG_SITE_SECURE_URL + "/img",
'provider': provider,
'label': label,
'referer': referer,
'class': classes,
'icon_size': icon_size
}
return out
def tmpl_external_login_panel(self, ln, referer):
"""
Template for external login buttons
"""
from invenio.websession_config import CFG_EXTERNAL_LOGIN_LARGE
from invenio.websession_config import CFG_EXTERNAL_LOGIN_BUTTON_ORDER
from invenio.websession_config import CFG_EXTERNAL_LOGIN_FORM_LABELS
from invenio.access_control_config import CFG_OPENID_CONFIGURATIONS
def construct_button(provider, size, button_class):
"""
Constructs a button for given provider.
@param provider: the name of the provider.
@type provider: str
@param size: the size of the login button
@type size: int
@param button_class: the additional class for the login button
@type button_class: str
@rtype str
"""
_ = gettext_set_language(ln)
# Look if the login button needs a form.
config = CFG_OPENID_CONFIGURATIONS.get(provider, {})
identifier = config.get('identifier', '')
if "{0}" in identifier:
label = CFG_EXTERNAL_LOGIN_FORM_LABELS.get(provider,
"%(provider)s username")
return self.tmpl_external_login_form(provider,
referer = referer,
icon_size = size,
classes = button_class,
label = _(label))
else:
return self.tmpl_external_login_button(provider,
referer = referer,
icon_size = size,
classes = button_class)
activated_providers = CFG_OPENID_PROVIDERS * CFG_OPENID_AUTHENTICATION \
+ CFG_OAUTH1_PROVIDERS * CFG_OAUTH1_AUTHENTICATION \
+ CFG_OAUTH2_PROVIDERS * CFG_OAUTH2_AUTHENTICATION
if not len(activated_providers):
return ""
out = ""
out += "<div id='buttons'>"
out += "<strong>You may login with:</strong>"
out += "<div id='big_buttons'>"
for provider in CFG_EXTERNAL_LOGIN_LARGE:
if provider in activated_providers:
out += construct_button(provider, 48, "login_button_big")
out += "</div>"
out += "<div id='small_buttons'>"
providers = CFG_EXTERNAL_LOGIN_BUTTON_ORDER
if (len(activated_providers) - len(CFG_EXTERNAL_LOGIN_LARGE)) != \
len(providers):
# Not all the providers ordered. Add the unsorted ones to the end.
for provider in sorted(activated_providers):
if not provider in providers:
providers.append(provider)
for provider in providers:
if not provider in CFG_EXTERNAL_LOGIN_LARGE:
out += construct_button(provider, 24, "login_button_small")
out += "</div>"
out += "<div id='form_field'>"
out += "</div>"
out += "</div>"
out += """
<script type="text/javascript">
function show_username_form(element) {
form_field = document.getElementById('form_field');
form_field.innerHTML = element.nextSibling.nextSibling.innerHTML;
}
</script>"""
return out
|
jmartinm/invenio
|
modules/websession/lib/websession_templates.py
|
Python
|
gpl-2.0
| 119,669
|
[
"VisIt"
] |
689a7d24b07911a1005b4ec6f75f55cefc5333284d5852046bdf98a4defa3cb6
|
#!/usr/bin/env python
import vtk
from vtk.test import Testing
from vtk.util.misc import vtkGetDataRoot
VTK_DATA_ROOT = vtkGetDataRoot()
# Create the RenderWindow, Renderer and both Actors
#
ren = vtk.vtkRenderer()
renWin = vtk.vtkRenderWindow()
renWin.SetMultiSamples(0)
renWin.AddRenderer(ren)
iren = vtk.vtkRenderWindowInteractor()
iren.SetRenderWindow(renWin)
# cut data
pl3d = vtk.vtkMultiBlockPLOT3DReader()
pl3d.SetXYZFileName(VTK_DATA_ROOT + "/Data/combxyz.bin")
pl3d.SetQFileName(VTK_DATA_ROOT + "/Data/combq.bin")
pl3d.SetScalarFunctionNumber(100)
pl3d.SetVectorFunctionNumber(202)
pl3d.Update()
output = pl3d.GetOutput().GetBlock(0)
plane = vtk.vtkPlane()
plane.SetOrigin(output.GetCenter())
plane.SetNormal(-0.287, 0, 0.9579)
planeCut = vtk.vtkCutter()
planeCut.SetInputData(output)
planeCut.SetCutFunction(plane)
probe = vtk.vtkProbeFilter()
probe.SetInputConnection(planeCut.GetOutputPort())
probe.SetSourceData(output)
cutMapper = vtk.vtkDataSetMapper()
cutMapper.SetInputConnection(probe.GetOutputPort())
cutMapper.SetScalarRange(output.GetPointData().GetScalars().GetRange())
cutActor = vtk.vtkActor()
cutActor.SetMapper(cutMapper)
#extract plane
compPlane = vtk.vtkStructuredGridGeometryFilter()
compPlane.SetInputData(output)
compPlane.SetExtent(0, 100, 0, 100, 9, 9)
planeMapper = vtk.vtkPolyDataMapper()
planeMapper.SetInputConnection(compPlane.GetOutputPort())
planeMapper.ScalarVisibilityOff()
planeActor = vtk.vtkActor()
planeActor.SetMapper(planeMapper)
planeActor.GetProperty().SetRepresentationToWireframe()
planeActor.GetProperty().SetColor(0, 0, 0)
#outline
outline = vtk.vtkStructuredGridOutlineFilter()
outline.SetInputData(output)
outlineMapper = vtk.vtkPolyDataMapper()
outlineMapper.SetInputConnection(outline.GetOutputPort())
outlineActor = vtk.vtkActor()
outlineActor.SetMapper(outlineMapper)
outlineProp = outlineActor.GetProperty()
outlineProp.SetColor(0, 0, 0)
# Add the actors to the renderer, set the background and size
#
ren.AddActor(outlineActor)
ren.AddActor(planeActor)
ren.AddActor(cutActor)
ren.SetBackground(1, 1, 1)
renWin.SetSize(400, 300)
cam1 = ren.GetActiveCamera()
cam1.SetClippingRange(11.1034, 59.5328)
cam1.SetFocalPoint(9.71821, 0.458166, 29.3999)
cam1.SetPosition(-2.95748, -26.7271, 44.5309)
cam1.SetViewUp(0.0184785, 0.479657, 0.877262)
iren.Initialize()
renWin.Render()
#iren.Start()
|
hlzz/dotfiles
|
graphics/VTK-7.0.0/Filters/Core/Testing/Python/probe.py
|
Python
|
bsd-3-clause
| 2,456
|
[
"VTK"
] |
4d21e5607697003c91a74cfd631cb141d469d77c798ddda0d7b54cb4d4d4c8a8
|
import doctest
import inspect
import os
import re
from datetime import datetime
from types import SimpleNamespace
from unittest.mock import Mock, patch
from django.test import SimpleTestCase, TestCase
import attr
from dateutil.tz import tzoffset, tzutc
from lxml import etree
from nose.tools import assert_equal, assert_is_none, assert_raises
import corehq.motech.openmrs.atom_feed
from corehq.motech.openmrs.atom_feed import (
get_case_block_kwargs_from_bahmni_diagnoses,
get_case_block_kwargs_from_observations,
get_diagnosis_mappings,
get_encounter_uuid,
get_feed_updates,
get_feed_xml,
get_observation_mappings,
get_patient_uuid,
get_timestamp,
import_encounter,
)
from corehq.motech.openmrs.const import ATOM_FEED_NAME_PATIENT
from corehq.motech.openmrs.exceptions import (
OpenmrsException,
OpenmrsFeedRuntimeException,
OpenmrsFeedSyntaxError,
)
from corehq.motech.openmrs.repeaters import AtomFeedStatus, OpenmrsRepeater
from corehq.motech.requests import Requests
from corehq.util.test_utils import TestFileMixin
@attr.s
class CaseMock:
case_id = attr.ib()
name = attr.ib()
type = attr.ib()
owner_id = attr.ib()
class GetTimestampTests(SimpleTestCase):
def setUp(self):
self.feed_xml = inspect.cleandoc("""<?xml version="1.0" encoding="UTF-8"?>
<feed xmlns="http://www.w3.org/2005/Atom">
<title>Patient AOP</title>
<updated>2018-05-15T14:02:08Z</updated>
<entry>
<title>Patient</title>
<updated>2018-04-26T10:56:10Z</updated>
</entry>
</feed>""")
def test_no_node(self):
xml = re.sub(r'<updated.*</updated>', '', self.feed_xml)
feed_elem = etree.XML(xml.encode('utf-8'))
with self.assertRaisesRegex(ValueError, r'^XPath "./atom:updated" not found$'):
get_timestamp(feed_elem)
def test_xpath(self):
feed_elem = etree.XML(self.feed_xml.encode('utf-8'))
# "*[local-name()='foo']" ignores namespaces and matches all nodes with tag "foo":
timestamp = get_timestamp(feed_elem, "./*[local-name()='entry']/*[local-name()='updated']")
self.assertEqual(timestamp, datetime(2018, 4, 26, 10, 56, 10, tzinfo=tzutc()))
def test_bad_date(self):
xml = re.sub(r'2018-05-15T14:02:08Z', 'Nevermore', self.feed_xml)
feed_elem = etree.XML(xml.encode('utf-8'))
with self.assertRaisesRegex(ValueError, r'Unknown string format'):
get_timestamp(feed_elem)
def test_timezone(self):
xml = re.sub(r'2018-05-15T14:02:08Z', '2018-05-15T14:02:08+0500', self.feed_xml)
feed_elem = etree.XML(xml.encode('utf-8'))
timestamp = get_timestamp(feed_elem)
self.assertEqual(timestamp, datetime(2018, 5, 15, 14, 2, 8, tzinfo=tzoffset(None, 5 * 60 * 60)))
class GetPatientUuidTests(SimpleTestCase):
def setUp(self):
self.feed_xml = inspect.cleandoc("""<?xml version="1.0" encoding="UTF-8"?>
<feed xmlns="http://www.w3.org/2005/Atom">
<title>Patient AOP</title>
<entry>
<title>Patient</title>
<content type="application/vnd.atomfeed+xml">
<![CDATA[/openmrs/ws/rest/v1/patient/e8aa08f6-86cd-42f9-8924-1b3ea021aeb4?v=full]]>
</content>
</entry>
</feed>""")
def test_no_content_node(self):
xml = re.sub(r'<content.*</content>', '', self.feed_xml, flags=re.DOTALL)
feed_elem = etree.XML(xml.encode('utf-8'))
entry_elem = next(e for e in feed_elem if e.tag.endswith('entry'))
with self.assertRaisesRegex(ValueError, r'^Patient UUID not found$'):
get_patient_uuid(entry_elem)
def test_bad_cdata(self):
xml = re.sub(r'e8aa08f6-86cd-42f9-8924-1b3ea021aeb4', 'mary-mallon', self.feed_xml)
feed_elem = etree.XML(xml.encode('utf-8'))
entry_elem = next(e for e in feed_elem if e.tag.endswith('entry'))
with self.assertRaisesRegex(ValueError, r'^Patient UUID not found$'):
get_patient_uuid(entry_elem)
def test_success(self):
feed_elem = etree.XML(self.feed_xml.encode('utf-8'))
entry_elem = next(e for e in feed_elem if e.tag.endswith('entry'))
patient_uuid = get_patient_uuid(entry_elem)
self.assertEqual(patient_uuid, 'e8aa08f6-86cd-42f9-8924-1b3ea021aeb4')
class GetEncounterUuidTests(SimpleTestCase):
def test_bed_assignment(self):
element = etree.XML("""<entry>
<title>Bed-Assignment</title>
<content type="application/vnd.atomfeed+xml">
<![CDATA[/openmrs/ws/rest/v1/bedPatientAssignment/fed0d6f9-e76c-4a8e-a10d-c8e98c7d258f?v=custom:(uuid,startDatetime,endDatetime,bed,patient,encounter:(uuid,encounterDatetime,encounterType:(uuid,name),visit:(uuid,startDatetime,visitType)))]]>
</content>
</entry>""")
encounter_uuid = get_encounter_uuid(element)
self.assertIsNone(encounter_uuid)
def test_unknown_entry(self):
element = etree.XML("""<entry>
<title>UnExPeCtEd</title>
<content type="application/vnd.atomfeed+xml">
<![CDATA[/openmrs/ws/rest/v1/UNKNOWN/0f54fe40-89af-4412-8dd4-5eaebe8684dc]]>
</content>
</entry>""")
with self.assertRaises(ValueError):
get_encounter_uuid(element)
class ImportEncounterTest(TestCase, TestFileMixin):
file_path = ('data',)
root = os.path.dirname(__file__)
def setUp(self):
self.case = CaseMock(
case_id='abcdef',
name='Randall',
type='patient',
owner_id='123456'
)
def tearDown(self):
self.repeater.connection_settings.delete()
self.repeater.delete()
def setUpRepeater(self):
observations = [
{
"doc_type": "ObservationMapping",
"concept": "5090AAAAAAAAAAAAAAAAAAAAAAAAAAAA",
"value": {
"doc_type": "FormQuestion",
"form_question": "/data/height"
},
"case_property": "height"
}
]
diagnoses = [
{
"doc_type": "ObservationMapping",
"concept": "f7e8da66-f9a7-4463-a8ca-99d8aeec17a0",
"value": {
"doc_type": "FormQuestionMap",
"direction": "in",
"form_question": "[unused when direction == 'in']",
"value_map": {
"emergency_room_user_id": "Hypothermia", # Value must match diagnosis name
}
},
"case_property": "owner_id"
},
{
"doc_type": "ObservationMapping",
"concept": "f7e8da66-f9a7-4463-a8ca-99d8aeec17a0",
"value": {
"doc_type": "JsonPathCasePropertyMap",
"direction": "in",
"jsonpath": "codedAnswer.name",
"case_property": "[unused when direction == 'in']",
"value_map": {
"yes": "Hypothermia"
}
},
"case_property": "hypothermia_diagnosis"
},
{
"doc_type": "ObservationMapping",
"concept": "f7e8da66-f9a7-4463-a8ca-99d8aeec17a0",
"value": {
"doc_type": "JsonPathCaseProperty",
"direction": "in",
"jsonpath": "diagnosisDateTime",
"case_property": "[unused when direction == 'in']",
"commcare_data_type": "cc_date",
"external_data_type": "omrs_datetime"
},
"case_property": "hypothermia_date"
}
]
self.repeater = OpenmrsRepeater.wrap(self.get_repeater_dict(observations, diagnoses))
def setUpRepeaterForExtCase(self):
observations = [
{
"doc_type": "ObservationMapping",
"concept": "5090AAAAAAAAAAAAAAAAAAAAAAAAAAAA",
"value": {
"doc_type": "FormQuestion",
"form_question": "/data/height"
},
"indexed_case_mapping": {
"identifier": "parent",
"case_type": "observation",
"relationship": "extension",
"case_properties": [
{
"doc_type": "JsonPathCaseProperty",
"jsonpath": "concept.name",
"case_property": "case_name",
},
{
"doc_type": "JsonPathCaseProperty",
"jsonpath": "value",
"case_property": "observation_value",
}
]
}
}
]
diagnoses = [
{
"doc_type": "ObservationMapping",
"concept": "f7e8da66-f9a7-4463-a8ca-99d8aeec17a0",
"value": {
"doc_type": "FormQuestionMap",
"form_question": "/data/bahmni_hypothermia",
"value_map": {
"emergency_room_user_id": "Hypothermia", # Value must match diagnosis name
},
"direction": "in",
},
},
{
"doc_type": "ObservationMapping",
"concept": "all", # Import all diagnoses as extension cases
"value": {
"direction": "in",
"value": "[unused when direction='in' and ObservationMapping.case_property not set]",
},
"indexed_case_mapping": {
"identifier": "parent",
"case_type": "diagnosis",
"relationship": "extension",
"case_properties": [
{
"jsonpath": "codedAnswer.name",
"case_property": "case_name",
},
{
"jsonpath": "certainty",
"case_property": "certainty",
},
{
"jsonpath": "order",
"case_property": "is_primary",
"value_map": {
"yes": "PRIMARY",
"no": "SECONDARY"
}
},
{
"jsonpath": "diagnosisDateTime",
"case_property": "diagnosis_date",
"external_data_type": "omrs_datetime",
"commcare_data_type": "cc_date",
}
]
}
}
]
self.repeater = OpenmrsRepeater.wrap(self.get_repeater_dict(observations, diagnoses))
def get_repeater_dict(self, observations, diagnoses):
return {
"_id": "123456",
"domain": "test_domain",
"url": "https://example.com/openmrs/",
"username": "foo",
"password": "bar",
"white_listed_case_types": ['patient'],
"openmrs_config": {
"form_configs": [{
"doc_type": "OpenmrsFormConfig",
"xmlns": "http://openrosa.org/formdesigner/9481169B-0381-4B27-BA37-A46AB7B4692D",
"openmrs_visit_type": "c22a5000-3f10-11e4-adec-0800271c1b75",
"openmrs_encounter_type": "81852aee-3f10-11e4-adec-0800271c1b75",
"openmrs_start_datetime": {
"direction": "in",
"case_property": "last_visit_date",
"external_data_type": "omrs_datetime",
"commcare_data_type": "cc_date",
# "jsonpath": "encounterDateTime", # get_encounter_datetime_value_sources() default value
},
"openmrs_observations": observations,
"bahmni_diagnoses": diagnoses
}]
}
}
def test_import_encounter(self):
"""
Importing the given encounter should update the case's "height" property
"""
response = Mock()
response.json.return_value = self.get_json('encounter')
self.setUpRepeater()
with patch.object(Requests, 'get') as get_patch, \
patch('corehq.motech.openmrs.atom_feed.submit_case_blocks') as submit_case_blocks_patch, \
patch('corehq.motech.openmrs.atom_feed.importer_util') as importer_util_patch, \
patch('corehq.motech.openmrs.repeaters.get_one_commcare_user_at_location'):
get_patch.return_value = response
importer_util_patch.lookup_case.return_value = (self.case, None)
import_encounter(self.repeater, 'c719b87f-d221-493b-bec7-c212aa813f5d')
case_block_re = """
<case case_id="abcdef" »
date_modified="[\\d\\-T\\:\\.Z]+" »
xmlns="http://commcarehq.org/case/transaction/v2">
<update>
<height>105</height>
<last_visit_date>2018-01-18</last_visit_date>
</update>
</case>"""
case_block_re = ''.join((l.strip() for l in case_block_re.split('\n'))).replace('»', '')
([case_block], domain), kwargs = submit_case_blocks_patch.call_args
self.assertRegex(case_block, case_block_re)
self.assertEqual(domain, 'test_domain')
self.assertEqual(kwargs['device_id'], 'openmrs-atomfeed-123456')
self.assertEqual(kwargs['xmlns'], 'http://commcarehq.org/openmrs-integration')
def test_get_case_block_kwargs_from_observations(self):
self.setUpRepeater()
encounter = self.get_json('encounter')
observations = encounter['observations']
case_block_kwargs, case_blocks = get_case_block_kwargs_from_observations(
observations,
get_observation_mappings(self.repeater),
(None, None, None)
)
self.assertEqual(case_block_kwargs, {'update': {'height': 105}})
self.assertEqual(case_blocks, [])
def test_get_case_block_kwargs_from_bahmni_diagnoses(self):
self.setUpRepeater()
encounter = self.get_json('encounter_with_diagnoses')
bahmni_diagnoses = encounter['bahmniDiagnoses']
case_block_kwargs, case_blocks = get_case_block_kwargs_from_bahmni_diagnoses(
bahmni_diagnoses,
get_diagnosis_mappings(self.repeater),
(None, None, None)
)
self.assertEqual(case_block_kwargs, {
'owner_id': 'emergency_room_user_id',
'update': {
'hypothermia_diagnosis': 'yes',
'hypothermia_date': '2019-10-18'
}
})
self.assertEqual(case_blocks, [])
def test_get_case_blocks_from_observations(self):
self.setUpRepeaterForExtCase()
encounter = self.get_json('encounter')
observations = encounter['observations']
case_block_kwargs, case_blocks = get_case_block_kwargs_from_observations(
observations,
get_observation_mappings(self.repeater),
('test-case-id', 'patient', 'default-owner-id')
)
self.assertEqual(case_block_kwargs, {'update': {}})
self.assertEqual(len(case_blocks), 1)
date_modified = case_blocks[0].date_modified.isoformat() + 'Z'
date_opened = case_blocks[0].date_opened
case_block = f"""
<case case_id="{case_blocks[0].case_id}" »
date_modified="{date_modified}" »
xmlns="http://commcarehq.org/case/transaction/v2">
<create>
<case_type>observation</case_type>
<case_name>HEIGHT</case_name>
<owner_id>default-owner-id</owner_id>
</create>
<update>
<date_opened>{date_opened}</date_opened>
<observation_value>105</observation_value>
</update>
<index>
<parent case_type="patient" relationship="extension">test-case-id</parent>
</index>
</case>"""
case_block = ''.join((l.strip() for l in case_block.split('\n'))).replace('»', '')
self.assertEqual(case_blocks[0].as_text(), case_block)
def test_get_case_blocks_from_bahmni_diagnoses(self):
self.setUpRepeaterForExtCase()
encounter = self.get_json('encounter_with_diagnoses')
bahmni_diagnoses = encounter['bahmniDiagnoses']
case_block_kwargs, case_blocks = get_case_block_kwargs_from_bahmni_diagnoses(
bahmni_diagnoses,
get_diagnosis_mappings(self.repeater),
('test-case-id', 'patient', 'default-owner-id')
)
self.assertEqual(case_block_kwargs, {'update': {}})
self.assertEqual(len(case_blocks), 1)
date_modified = case_blocks[0].date_modified.isoformat() + 'Z'
date_opened = case_blocks[0].date_opened
case_block = f"""
<case case_id="{case_blocks[0].case_id}" »
date_modified="{date_modified}" »
xmlns="http://commcarehq.org/case/transaction/v2">
<create>
<case_type>diagnosis</case_type>
<case_name>Hypothermia</case_name>
<owner_id>default-owner-id</owner_id>
</create>
<update>
<date_opened>{date_opened}</date_opened>
<certainty>CONFIRMED</certainty>
<diagnosis_date>2019-10-18</diagnosis_date>
<is_primary>yes</is_primary>
</update>
<index>
<parent case_type="patient" relationship="extension">test-case-id</parent>
</index>
</case>"""
case_block = ''.join((l.strip() for l in case_block.split('\n'))).replace('»', '')
self.assertEqual(case_blocks[0].as_text(), case_block)
def test_get_feed_xml_feed_does_not_exist():
page = '1000'
response_url = f'https://example.com/openmrs/ws/atomfeed/patient/{page}'
response = SimpleNamespace(
status_code=500,
url=response_url,
text=(
'<!DOCTYPE html>\n'
'<html><body><h1>HTTP Status 500 - Request processing failed; '
'nested exception is org.ict4h.atomfeed.server.exceptions'
'.AtomFeedRuntimeException: feed does not exist</h1></html>'
),
)
requests = Mock(
domain_name='test_domain',
get=lambda url: response,
)
with assert_raises(OpenmrsFeedRuntimeException):
get_feed_xml(requests, ATOM_FEED_NAME_PATIENT, page)
requests.notify_exception.assert_called_with(
'Domain "test_domain": Page does not exist in Atom feed '
f'"{response_url}". Resetting Atom feed status.',
'This can happen if the IP address of a Repeater is changed to point '
'to a different server, or if a server has been rebuilt. It can '
'signal more severe consequences, like attempts to synchronize '
'CommCare cases with OpenMRS patients that can no longer be found.'
)
def test_get_feed_xml_feedid_zero():
page = '0'
response_url = f'https://example.com/openmrs/ws/atomfeed/patient/{page}'
response = SimpleNamespace(
status_code=500,
url=response_url,
text=(
'<!DOCTYPE html>\n'
'<html><body><h1>HTTP Status 500 - Request processing failed; '
'nested exception is org.ict4h.atomfeed.server.exceptions'
'.AtomFeedRuntimeException: feedId must not be null and must be '
'greater than 0</h1></html>'
),
)
requests = Mock(
domain_name='test_domain',
get=lambda url: response,
)
with assert_raises(OpenmrsFeedRuntimeException):
get_feed_xml(requests, ATOM_FEED_NAME_PATIENT, '0')
requests.notify_exception.assert_called_with(
f'Domain "test_domain": Page "{page}" is not valid in Atom feed '
f'"{response_url}". Resetting Atom feed status.',
f'It is unclear how Atom feed pagination can lead to page "{page}". '
'Follow up with OpenMRS system administrator.'
)
def test_get_feed_xml_500():
page = 'recent'
response_url = f'https://example.com/openmrs/ws/atomfeed/patient/{page}'
response = SimpleNamespace(
status_code=500,
url=response_url,
text='<!DOCTYPE html>\n'
'<html><body>HTTP Status 500</body></html>',
)
requests = Mock(
domain_name='test_domain',
get=lambda url: response,
)
with assert_raises(OpenmrsException):
get_feed_xml(requests, ATOM_FEED_NAME_PATIENT, page)
requests.notify_exception.assert_called_with(
'Domain "test_domain": Unrecognized error in Atom feed '
f'"{response_url}".',
'Response text: \n'
'<!DOCTYPE html>\n<html><body>HTTP Status 500</body></html>'
)
def test_get_feed_xml_bad_xml():
page = 'recent'
response_url = f'https://example.com/openmrs/ws/atomfeed/patient/{page}'
response = SimpleNamespace(
status_code=200,
url=response_url,
content='<html><body>Bad XML</html>',
)
requests = Mock(
domain_name='test_domain',
get=lambda url: response,
)
with assert_raises(OpenmrsFeedSyntaxError):
get_feed_xml(requests, ATOM_FEED_NAME_PATIENT, page)
requests.notify_exception.assert_called_with(
'Opening and ending tag mismatch: body line 1 and html, line 1, '
'column 27 (<string>, line 1)',
'There is an XML syntax error in the OpenMRS Atom feed at '
f'"{response_url}".'
)
def test_get_feed_updates():
response = SimpleNamespace(
status_code=200,
url='https://www.example.com/openmrs/ws/atomfeed/patient/recent',
content='<html><body>Bad XML</html>',
)
requests = SimpleNamespace(
domain_name='test_domain',
get=lambda url: response,
notify_exception=lambda err, msg: None,
)
repeater = SimpleNamespace(
atom_feed_status={},
requests=requests,
)
with patch('corehq.motech.openmrs.atom_feed.get_feed_xml') \
as get_feed_xml_mock:
get_feed_xml_mock.side_effect = OpenmrsFeedSyntaxError
# Assert returns without raising
get_feed_updates(repeater, ATOM_FEED_NAME_PATIENT)
def test_status_defaults():
status = AtomFeedStatus()
assert_is_none(status.last_polled_at)
assert_equal(status.last_page, 'recent')
def test_doctests():
results = doctest.testmod(corehq.motech.openmrs.atom_feed)
assert results.failed == 0
|
dimagi/commcare-hq
|
corehq/motech/openmrs/tests/test_atom_feed.py
|
Python
|
bsd-3-clause
| 23,450
|
[
"VisIt"
] |
91488d8ff052b5b5d6bc103b2c9657f82f13ad00ef8c234f5ce16b28b7f48670
|
# Delete the last n messages sent by the bot
# Very useful for cleaning up after testing
# Manually interacts with Slack's API through res.robot.slack_client.api_call
import itertools
import json
import logging
from espresso.main import robot
@robot.respond(r"(?i)delete your last (?P<howmany>[0-9]*) messages")
def delete_n_messages(res):
howmany = int(res.match.group('howmany'))
channel_history = json.loads(res.robot.slack_client.api_call('channels.history',
channel=res.msg.channel.id,
inclusive=1))
channel_messages = channel_history['messages']
my_messages = filter(lambda m: ((m.get('type') == 'message') and ('subtype' not in m) and (m['user'] == robot.user.uid)), channel_messages)
for message in itertools.islice(my_messages, howmany):
logging.debug("deleting message with content %s", message['text'])
res.robot.slack_client.api_call("chat.delete",
ts=message['ts'],
channel=res.msg.channel.id)
logging.debug("deleted my last %i messages in the channel %s",
howmany, res.msg.channel.name)
|
ratchetrobotics/espresso
|
plugins/deletemessages.py
|
Python
|
bsd-3-clause
| 1,095
|
[
"ESPResSo"
] |
5004e5e086555a261a5096866a203ba5c06cff7dddae61b89bbd66808bd99973
|
"""
Test combination of multiple experiments and reflections files.
"""
from __future__ import annotations
import copy
import os
import procrunner
import pytest
from dxtbx.model.experiment_list import ExperimentListFactory
from dxtbx.serialize import load
import dials.command_line.combine_experiments as combine_experiments
from dials.array_family import flex
def test(dials_regression, run_in_tmpdir):
data_dir = os.path.join(
dials_regression, "refinement_test_data", "multi_narrow_wedges"
)
input_range = list(range(2, 49))
for i in (8, 10, 15, 16, 34, 39, 45):
input_range.remove(i)
phil_input = "\n".join(
(
" input.experiments={0}/data/sweep_%03d/experiments.json\n"
+ " input.reflections={0}/data/sweep_%03d/reflections.pickle"
)
% (i, i)
for i in input_range
)
# assert phil_input == "\n" + phil_input2 + "\n "
input_phil = (
phil_input.format(data_dir)
+ """
reference_from_experiment.beam=0
reference_from_experiment.scan=0
reference_from_experiment.goniometer=0
reference_from_experiment.detector=0
"""
)
with open("input.phil", "w") as phil_file:
phil_file.writelines(input_phil)
result = procrunner.run(["dials.combine_experiments", "input.phil"])
assert not result.returncode and not result.stderr
# load results
exp = ExperimentListFactory.from_json_file("combined.expt", check_format=False)
ref = flex.reflection_table.from_file("combined.refl")
# test the experiments
assert len(exp) == 103
assert len(exp.crystals()) == 103
assert len(exp.beams()) == 1
assert len(exp.scans()) == 1
assert len(exp.detectors()) == 1
assert len(exp.goniometers()) == 1
for e in exp:
assert e.imageset is not None
# test the reflections
assert len(ref) == 11689
result = procrunner.run(
["dials.split_experiments", "combined.expt", "combined.refl"]
)
assert not result.returncode and not result.stderr
for i, e in enumerate(exp):
assert os.path.exists("split_%03d.expt" % i)
assert os.path.exists("split_%03d.refl" % i)
exp_single = ExperimentListFactory.from_json_file(
"split_%03d.expt" % i, check_format=False
)
ref_single = flex.reflection_table.from_file("split_%03d.refl" % i)
assert len(exp_single) == 1
assert exp_single[0].crystal == e.crystal
assert exp_single[0].beam == e.beam
assert exp_single[0].detector == e.detector
assert exp_single[0].scan == e.scan
assert exp_single[0].goniometer == e.goniometer
assert exp_single[0].imageset == e.imageset
assert len(ref_single) == len(ref.select(ref["id"] == i))
assert ref_single["id"].all_eq(0)
result = procrunner.run(
["dials.split_experiments", "combined.expt", "output.experiments_prefix=test"]
)
assert not result.returncode and not result.stderr
for i in range(len(exp)):
assert os.path.exists("test_%03d.expt" % i)
# Modify a copy of the detector
detector = copy.deepcopy(exp.detectors()[0])
panel = detector[0]
x, y, z = panel.get_origin()
panel.set_frame(panel.get_fast_axis(), panel.get_slow_axis(), (x, y, z + 10))
# Set half of the experiments to the new detector
for i in range(len(exp) // 2):
exp[i].detector = detector
exp.as_json("modded.expt")
result = procrunner.run(
[
"dials.split_experiments",
"modded.expt",
"combined.refl",
"output.experiments_prefix=test_by_detector",
"output.reflections_prefix=test_by_detector",
"by_detector=True",
]
)
assert not result.returncode and not result.stderr
for i in range(2):
assert os.path.exists("test_by_detector_%03d.expt" % i)
assert os.path.exists("test_by_detector_%03d.refl" % i)
assert not os.path.exists("test_by_detector_%03d.expt" % 2)
assert not os.path.exists("test_by_detector_%03d.refl" % 2)
# Now do test when input has identifiers set
reflections = flex.reflection_table().from_file("combined.refl")
explist = ExperimentListFactory.from_json_file("combined.expt", check_format=False)
# set string identifiers as nonconsecutive 0,2,4,6....
for i, exp in enumerate(explist):
assert i in reflections["id"]
reflections.experiment_identifiers()[i] = str(i * 2)
exp.identifier = str(i * 2)
reflections.as_file("assigned.refl")
explist.as_json("assigned.expt")
result = procrunner.run(
["dials.split_experiments", "assigned.expt", "assigned.refl"]
)
assert not result.returncode and not result.stderr
for i in range(len(explist)):
assert os.path.exists("split_%03d.expt" % i)
assert os.path.exists("split_%03d.refl" % i)
exp_single = ExperimentListFactory.from_json_file(
"split_%03d.expt" % i, check_format=False
)
ref_single = flex.reflection_table.from_file("split_%03d.refl" % i)
assert len(exp_single) == 1
# resets all ids to 0, but keeps mapping to unique identifier.
# doesn't have to be set to 0 but doing this to keep more consistent with
# other dials programs
assert ref_single["id"].all_eq(0)
assert ref_single.experiment_identifiers()[0] == str(i * 2)
# update modded experiments to have same identifiers as assigned_experiments
moddedlist = ExperimentListFactory.from_json_file("modded.expt", check_format=False)
for i, exp in enumerate(moddedlist):
exp.identifier = str(i * 2)
moddedlist.as_json("modded.expt")
result = procrunner.run(
[
"dials.split_experiments",
"modded.expt",
"assigned.refl",
"output.experiments_prefix=test_by_detector",
"output.reflections_prefix=test_by_detector",
"by_detector=True",
]
)
assert not result.returncode and not result.stderr
# Expect each datasets to have ids from 0..50 with experiment identifiers
# all kept from before 0,2,4,6,...
current_exp_id = 0
for i in range(2):
assert os.path.exists("test_by_detector_%03d.expt" % i)
assert os.path.exists("test_by_detector_%03d.refl" % i)
explist = ExperimentListFactory.from_json_file(
"test_by_detector_%03d.expt" % i, check_format=False
)
refl = flex.reflection_table.from_file("test_by_detector_%03d.refl" % i)
for k in range(len(explist)):
assert refl.experiment_identifiers()[k] == str(current_exp_id)
current_exp_id += 2
assert not os.path.exists("test_by_detector_%03d.expt" % 2)
assert not os.path.exists("test_by_detector_%03d.refl" % 2)
@pytest.mark.parametrize("with_identifiers", ["True", "False"])
def test_combine_clustering(dials_data, tmpdir, with_identifiers):
"""Test with the clustering.use=True option.
Need to use an integrated dataset for this option.
"""
data_dir = dials_data("multi_crystal_proteinase_k")
input_range = [2, 3, 4, 5, 10]
if with_identifiers:
for n, i in enumerate(input_range):
command = [
"dials.assign_experiment_identifiers",
data_dir.strpath + f"/experiments_{i}.json",
data_dir.strpath + f"/reflections_{i}.pickle",
f"output.experiments={n}.expt",
f"output.reflections={n}.refl",
]
procrunner.run(command, working_directory=tmpdir)
phil_input = "\n".join(
(
" input.experiments=%s\n" % tmpdir.join("%s.expt" % i)
+ " input.reflections=%s" % tmpdir.join("%s.refl" % i)
)
for i in [0, 1, 2, 3, 4]
)
else:
phil_input = "\n".join(
(
" input.experiments={0}/experiments_%s.json\n"
+ " input.reflections={0}/reflections_%s.pickle"
)
% (i, i)
for i in input_range
).format(data_dir.strpath)
with open(tmpdir.join("input.phil").strpath, "w") as phil_file:
phil_file.writelines(phil_input)
result = procrunner.run(
[
"dials.combine_experiments",
tmpdir.join("input.phil").strpath,
"clustering.use=True",
"threshold=5",
"max_clusters=2",
],
working_directory=tmpdir,
)
# this should create two clusters:
# combined_cluster_1 (2 expts)
# combined_cluster_2 (3 expts)
assert not result.returncode and not result.stderr
assert tmpdir.join("combined_cluster2.refl").check()
assert tmpdir.join("combined_cluster2.expt").check()
assert tmpdir.join("combined_cluster1.refl").check()
assert tmpdir.join("combined_cluster1.expt").check()
exps = load.experiment_list(
tmpdir.join("combined_cluster1.expt").strpath, check_format=False
)
assert len(exps) == 2
refls = flex.reflection_table.from_file(tmpdir.join("combined_cluster1.refl"))
assert list(set(refls["id"])) == [0, 1]
exps = load.experiment_list(
tmpdir.join("combined_cluster2.expt").strpath, check_format=False
)
assert len(exps) == 3
refls = flex.reflection_table.from_file(tmpdir.join("combined_cluster2.refl"))
assert list(set(refls["id"])) == [0, 1, 2]
@pytest.fixture
def narrow_wedge_input_with_identifiers(dials_regression, tmpdir):
"""Make a fixture to avoid multiple runs of assign identifiers."""
data_dir = os.path.join(
dials_regression, "refinement_test_data", "multi_narrow_wedges"
)
input_range = [9, 11, 12, 31]
for n, i in enumerate(input_range):
command = [
"dials.assign_experiment_identifiers",
os.path.join(data_dir, "data/sweep_%03d/experiments.json" % i),
os.path.join(data_dir, "data/sweep_%03d/reflections.pickle" % i),
f"output.experiments={n}.expt",
f"output.reflections={n}.refl",
]
procrunner.run(command, working_directory=tmpdir)
phil_input = "\n".join(
(
" input.experiments=%s\n" % tmpdir.join("%s.expt" % i)
+ " input.reflections=%s" % tmpdir.join("%s.refl" % i)
)
for i, _ in enumerate(input_range)
)
return phil_input
@pytest.mark.parametrize("min_refl", ["None", "100"])
@pytest.mark.parametrize("max_refl", ["None", "150"])
def test_min_max_reflections_per_experiment(
dials_regression, run_in_tmpdir, min_refl, max_refl
):
expected_results = {
("None", "None"): 10,
("None", "150"): 9,
("100", "None"): 6,
("100", "150"): 5,
}
data_dir = os.path.join(dials_regression, "refinement_test_data", "multi_stills")
input_phil = (
" input.experiments={0}/combined_experiments.json\n"
+ " input.reflections={0}/combined_reflections.pickle\n"
+ " output.min_reflections_per_experiment={1}\n"
+ " output.max_reflections_per_experiment={2}\n"
).format(data_dir, min_refl, max_refl)
with open("input.phil", "w") as phil_file:
phil_file.writelines(input_phil)
result = procrunner.run(["dials.combine_experiments", "input.phil"])
assert not result.returncode and not result.stderr
# load results
exp = ExperimentListFactory.from_json_file("combined.expt", check_format=False)
assert len(exp) == expected_results[(min_refl, max_refl)]
@pytest.mark.parametrize("with_identifiers", ["True", "False"])
@pytest.mark.parametrize("method", ["random", "n_refl", "significance_filter"])
def test_combine_nsubset(
dials_regression,
tmpdir,
with_identifiers,
method,
narrow_wedge_input_with_identifiers,
):
"""Test with the n_subset option."""
if with_identifiers:
phil_input = narrow_wedge_input_with_identifiers
else:
data_dir = os.path.join(
dials_regression, "refinement_test_data", "multi_narrow_wedges"
)
input_range = [9, 11, 12, 31]
phil_input = "\n".join(
(
" input.experiments={0}/data/sweep_%03d/experiments.json\n"
+ " input.reflections={0}/data/sweep_%03d/reflections.pickle"
)
% (i, i)
for i in input_range
).format(data_dir)
with open(tmpdir.join("input.phil").strpath, "w") as phil_file:
phil_file.writelines(phil_input)
result = procrunner.run(
[
"dials.combine_experiments",
tmpdir.join("input.phil").strpath,
"n_subset=3",
f"n_subset_method={method}",
],
working_directory=tmpdir,
)
assert not result.returncode and not result.stderr
assert tmpdir.join("combined.refl").check()
assert tmpdir.join("combined.expt").check()
exps = load.experiment_list(
tmpdir.join("combined.expt").strpath, check_format=False
)
assert len(exps) == 3
refls = flex.reflection_table.from_file(tmpdir.join("combined.refl"))
# Check that order are the same to ensure consistent for historical
# use of ordered ids to match across datastructures
assert list(exps.identifiers()) == list(refls.experiment_identifiers().values())
assert len(set(refls["id"])) == 3
assert list(set(refls["id"])) == [0, 1, 2]
def test_failed_tolerance_error(dials_regression, monkeypatch):
"""Test that we get a sensible error message on tolerance failures"""
# Select some experiments to use for combining
jsons = os.path.join(
dials_regression,
"refinement_test_data",
"multi_narrow_wedges",
"data",
"sweep_{:03d}",
"{}",
)
files = [
jsons.format(2, "experiments.json"),
jsons.format(2, "reflections.pickle"),
jsons.format(3, "experiments.json"),
jsons.format(3, "reflections.pickle"),
]
# Use the combine script
script = combine_experiments.Script()
# Disable writing output
monkeypatch.setattr(script, "_save_output", lambda *args: None)
# Parse arguments and configure
params, options = script.parser.parse_args(files)
params.reference_from_experiment.beam = 0
# Validate that these pass
script.run_with_preparsed(params, options)
# Now, alter the beam to check it doesn't pass
exp_2 = params.input.experiments[1].data[0]
exp_2.beam.set_wavelength(exp_2.beam.get_wavelength() * 2)
with pytest.raises(SystemExit) as exc:
script.run_with_preparsed(params, options)
assert "Beam" in str(exc.value)
print("Got (expected) error message:", exc.value)
def test_combine_imagesets(dials_data, tmp_path):
data = dials_data("l_cysteine_dials_output", pathlib=True)
args = [
*sorted(data.glob("*_integrated.pickle")),
*sorted(data.glob("*_integrated_experiments.json")),
f"experiments_filename={tmp_path}/combined.expt",
f"reflections_file={tmp_path}/combined.refl",
]
combine_experiments.run([str(x) for x in args])
comb = flex.reflection_table.from_file(tmp_path / "combined.refl")
assert set(comb["imageset_id"]) == {0, 1, 2, 3}
# Check that we have preserved unindexed reflections for all of these
assert set(comb.select(comb["id"] == -1)["imageset_id"]) == {0, 1, 2, 3}
|
dials/dials
|
tests/command_line/test_combine_experiments.py
|
Python
|
bsd-3-clause
| 15,490
|
[
"CRYSTAL"
] |
1e4dc479d2d5b9663ea40bd9f83aae3d6389d3154ec9788e3d8614159aec2760
|
import numpy as np
from numpy.matrixlib.defmatrix import matrix, asmatrix
# need * as we're copying the numpy namespace
from numpy import *
__version__ = np.__version__
__all__ = np.__all__[:] # copy numpy namespace
__all__ += ['rand', 'randn', 'repmat']
def empty(shape, dtype=None, order='C'):
"""
Return a new matrix of given shape and type, without initializing entries.
Parameters
----------
shape : int or tuple of int
Shape of the empty matrix.
dtype : data-type, optional
Desired output data-type.
order : {'C', 'F'}, optional
Whether to store multi-dimensional data in C (row-major) or
Fortran (column-major) order in memory.
See Also
--------
empty_like, zeros
Notes
-----
`empty`, unlike `zeros`, does not set the matrix values to zero,
and may therefore be marginally faster. On the other hand, it requires
the user to manually set all the values in the array, and should be
used with caution.
Examples
--------
>>> import numpy.matlib
>>> np.matlib.empty((2, 2)) # filled with random data
matrix([[ 6.76425276e-320, 9.79033856e-307],
[ 7.39337286e-309, 3.22135945e-309]]) #random
>>> np.matlib.empty((2, 2), dtype=int)
matrix([[ 6600475, 0],
[ 6586976, 22740995]]) #random
"""
return ndarray.__new__(matrix, shape, dtype, order=order)
def ones(shape, dtype=None, order='C'):
"""
Matrix of ones.
Return a matrix of given shape and type, filled with ones.
Parameters
----------
shape : {sequence of ints, int}
Shape of the matrix
dtype : data-type, optional
The desired data-type for the matrix, default is np.float64.
order : {'C', 'F'}, optional
Whether to store matrix in C- or Fortran-contiguous order,
default is 'C'.
Returns
-------
out : matrix
Matrix of ones of given shape, dtype, and order.
See Also
--------
ones : Array of ones.
matlib.zeros : Zero matrix.
Notes
-----
If `shape` has length one i.e. ``(N,)``, or is a scalar ``N``,
`out` becomes a single row matrix of shape ``(1,N)``.
Examples
--------
>>> np.matlib.ones((2,3))
matrix([[ 1., 1., 1.],
[ 1., 1., 1.]])
>>> np.matlib.ones(2)
matrix([[ 1., 1.]])
"""
a = ndarray.__new__(matrix, shape, dtype, order=order)
a.fill(1)
return a
def zeros(shape, dtype=None, order='C'):
"""
Return a matrix of given shape and type, filled with zeros.
Parameters
----------
shape : int or sequence of ints
Shape of the matrix
dtype : data-type, optional
The desired data-type for the matrix, default is float.
order : {'C', 'F'}, optional
Whether to store the result in C- or Fortran-contiguous order,
default is 'C'.
Returns
-------
out : matrix
Zero matrix of given shape, dtype, and order.
See Also
--------
numpy.zeros : Equivalent array function.
matlib.ones : Return a matrix of ones.
Notes
-----
If `shape` has length one i.e. ``(N,)``, or is a scalar ``N``,
`out` becomes a single row matrix of shape ``(1,N)``.
Examples
--------
>>> import numpy.matlib
>>> np.matlib.zeros((2, 3))
matrix([[ 0., 0., 0.],
[ 0., 0., 0.]])
>>> np.matlib.zeros(2)
matrix([[ 0., 0.]])
"""
a = ndarray.__new__(matrix, shape, dtype, order=order)
a.fill(0)
return a
def identity(n,dtype=None):
"""
Returns the square identity matrix of given size.
Parameters
----------
n : int
Size of the returned identity matrix.
dtype : data-type, optional
Data-type of the output. Defaults to ``float``.
Returns
-------
out : matrix
`n` x `n` matrix with its main diagonal set to one,
and all other elements zero.
See Also
--------
numpy.identity : Equivalent array function.
matlib.eye : More general matrix identity function.
Examples
--------
>>> import numpy.matlib
>>> np.matlib.identity(3, dtype=int)
matrix([[1, 0, 0],
[0, 1, 0],
[0, 0, 1]])
"""
a = array([1]+n*[0],dtype=dtype)
b = empty((n,n),dtype=dtype)
b.flat = a
return b
def eye(n,M=None, k=0, dtype=float):
"""
Return a matrix with ones on the diagonal and zeros elsewhere.
Parameters
----------
n : int
Number of rows in the output.
M : int, optional
Number of columns in the output, defaults to `n`.
k : int, optional
Index of the diagonal: 0 refers to the main diagonal,
a positive value refers to an upper diagonal,
and a negative value to a lower diagonal.
dtype : dtype, optional
Data-type of the returned matrix.
Returns
-------
I : matrix
A `n` x `M` matrix where all elements are equal to zero,
except for the `k`-th diagonal, whose values are equal to one.
See Also
--------
numpy.eye : Equivalent array function.
identity : Square identity matrix.
Examples
--------
>>> import numpy.matlib
>>> np.matlib.eye(3, k=1, dtype=float)
matrix([[ 0., 1., 0.],
[ 0., 0., 1.],
[ 0., 0., 0.]])
"""
return asmatrix(np.eye(n,M,k,dtype))
def rand(*args):
"""
Return a matrix of random values with given shape.
Create a matrix of the given shape and propagate it with
random samples from a uniform distribution over ``[0, 1)``.
Parameters
----------
\\*args : Arguments
Shape of the output.
If given as N integers, each integer specifies the size of one
dimension.
If given as a tuple, this tuple gives the complete shape.
Returns
-------
out : ndarray
The matrix of random values with shape given by `\\*args`.
See Also
--------
randn, numpy.random.rand
Examples
--------
>>> import numpy.matlib
>>> np.matlib.rand(2, 3)
matrix([[ 0.68340382, 0.67926887, 0.83271405],
[ 0.00793551, 0.20468222, 0.95253525]]) #random
>>> np.matlib.rand((2, 3))
matrix([[ 0.84682055, 0.73626594, 0.11308016],
[ 0.85429008, 0.3294825 , 0.89139555]]) #random
If the first argument is a tuple, other arguments are ignored:
>>> np.matlib.rand((2, 3), 4)
matrix([[ 0.46898646, 0.15163588, 0.95188261],
[ 0.59208621, 0.09561818, 0.00583606]]) #random
"""
if isinstance(args[0], tuple):
args = args[0]
return asmatrix(np.random.rand(*args))
def randn(*args):
"""
Return a random matrix with data from the "standard normal" distribution.
`randn` generates a matrix filled with random floats sampled from a
univariate "normal" (Gaussian) distribution of mean 0 and variance 1.
Parameters
----------
\\*args : Arguments
Shape of the output.
If given as N integers, each integer specifies the size of one
dimension. If given as a tuple, this tuple gives the complete shape.
Returns
-------
Z : matrix of floats
A matrix of floating-point samples drawn from the standard normal
distribution.
See Also
--------
rand, random.randn
Notes
-----
For random samples from :math:`N(\\mu, \\sigma^2)`, use:
``sigma * np.matlib.randn(...) + mu``
Examples
--------
>>> import numpy.matlib
>>> np.matlib.randn(1)
matrix([[-0.09542833]]) #random
>>> np.matlib.randn(1, 2, 3)
matrix([[ 0.16198284, 0.0194571 , 0.18312985],
[-0.7509172 , 1.61055 , 0.45298599]]) #random
Two-by-four matrix of samples from :math:`N(3, 6.25)`:
>>> 2.5 * np.matlib.randn((2, 4)) + 3
matrix([[ 4.74085004, 8.89381862, 4.09042411, 4.83721922],
[ 7.52373709, 5.07933944, -2.64043543, 0.45610557]]) #random
"""
if isinstance(args[0], tuple):
args = args[0]
return asmatrix(np.random.randn(*args))
def repmat(a, m, n):
"""
Repeat a 0-D to 2-D array or matrix MxN times.
Parameters
----------
a : array_like
The array or matrix to be repeated.
m, n : int
The number of times `a` is repeated along the first and second axes.
Returns
-------
out : ndarray
The result of repeating `a`.
Examples
--------
>>> import numpy.matlib
>>> a0 = np.array(1)
>>> np.matlib.repmat(a0, 2, 3)
array([[1, 1, 1],
[1, 1, 1]])
>>> a1 = np.arange(4)
>>> np.matlib.repmat(a1, 2, 2)
array([[0, 1, 2, 3, 0, 1, 2, 3],
[0, 1, 2, 3, 0, 1, 2, 3]])
>>> a2 = np.asmatrix(np.arange(6).reshape(2, 3))
>>> np.matlib.repmat(a2, 2, 3)
matrix([[0, 1, 2, 0, 1, 2, 0, 1, 2],
[3, 4, 5, 3, 4, 5, 3, 4, 5],
[0, 1, 2, 0, 1, 2, 0, 1, 2],
[3, 4, 5, 3, 4, 5, 3, 4, 5]])
"""
a = asanyarray(a)
ndim = a.ndim
if ndim == 0:
origrows, origcols = (1,1)
elif ndim == 1:
origrows, origcols = (1, a.shape[0])
else:
origrows, origcols = a.shape
rows = origrows * m
cols = origcols * n
c = a.reshape(1,a.size).repeat(m, 0).reshape(rows, origcols).repeat(n,0)
return c.reshape(rows, cols)
|
GbalsaC/bitnamiP
|
venv/lib/python2.7/site-packages/numpy/matlib.py
|
Python
|
agpl-3.0
| 9,494
|
[
"Gaussian"
] |
e5383f9fa588303d5f2f0ad8ed46781449d34cf6b681b9bfdebe81f2e05f8d41
|
"""Mayavi/traits GUI visualization elements."""
# Authors: Christian Brodbeck <christianbrodbeck@nyu.edu>
#
# License: BSD (3-clause)
import numpy as np
from mayavi.mlab import pipeline, text3d
from mayavi.modules.glyph import Glyph
from mayavi.modules.surface import Surface
from mayavi.sources.vtk_data_source import VTKDataSource
from mayavi.tools.mlab_scene_model import MlabSceneModel
from pyface.api import error
from traits.api import (HasTraits, HasPrivateTraits, on_trait_change,
Instance, Array, Bool, Button, Enum, Float, Int, List,
Range, Str, RGBColor)
from traitsui.api import View, Item, HGroup, VGrid, VGroup
from tvtk.api import tvtk
from ..surface import complete_surface_info
from ..transforms import apply_trans
from ..utils import SilenceStdout
from ..viz._3d import _create_mesh_surf, _toggle_mlab_render
headview_borders = VGroup(Item('headview', style='custom', show_label=False),
show_border=True, label='View')
class HeadViewController(HasTraits):
"""Set head views for the given coordinate system.
Parameters
----------
system : 'RAS' | 'ALS' | 'ARI'
Coordinate system described as initials for directions associated with
the x, y, and z axes. Relevant terms are: Anterior, Right, Left,
Superior, Inferior.
"""
system = Enum("RAS", "ALS", "ARI", desc="Coordinate system: directions of "
"the x, y, and z axis.")
right = Button()
front = Button()
left = Button()
top = Button()
interaction = Enum('Trackball', 'Terrain')
scale = Float(0.16)
scene = Instance(MlabSceneModel)
view = View(VGrid('0', 'top', '0', Item('scale', label='Scale',
show_label=True),
'right', 'front', 'left', 'interaction',
show_labels=False, columns=4))
@on_trait_change('scene.activated')
def _init_view(self):
self.scene.parallel_projection = True
self._trackball_interactor = None
# apparently scene,activated happens several times
if self.scene.renderer:
self.sync_trait('scale', self.scene.camera, 'parallel_scale')
# and apparently this does not happen by default:
self.on_trait_change(self.scene.render, 'scale')
@on_trait_change('interaction')
def on_set_interaction(self, _, interaction):
if self.scene is None:
return
if interaction == 'Terrain':
# Ensure we're in the correct orientatino for the
# InteractorStyleTerrain to have the correct "up"
if self._trackball_interactor is None:
self._trackball_interactor = \
self.scene.interactor.interactor_style
self.on_set_view('front', '')
self.scene.mlab.draw()
self.scene.interactor.interactor_style = \
tvtk.InteractorStyleTerrain()
self.on_set_view('front', '')
self.scene.mlab.draw()
else: # interaction == 'trackball'
self.scene.interactor.interactor_style = self._trackball_interactor
@on_trait_change('top,left,right,front')
def on_set_view(self, view, _):
if self.scene is None:
return
system = self.system
kwargs = dict(ALS=dict(front=(0, 90, -90),
left=(90, 90, 180),
right=(-90, 90, 0),
top=(0, 0, -90)),
RAS=dict(front=(90., 90., 180),
left=(180, 90, 90),
right=(0., 90, 270),
top=(90, 0, 180)),
ARI=dict(front=(0, 90, 90),
left=(-90, 90, 180),
right=(90, 90, 0),
top=(0, 180, 90)))
if system not in kwargs:
raise ValueError("Invalid system: %r" % system)
if view not in kwargs[system]:
raise ValueError("Invalid view: %r" % view)
kwargs = dict(zip(('azimuth', 'elevation', 'roll'),
kwargs[system][view]))
with SilenceStdout():
self.scene.mlab.view(distance=None, reset_roll=True,
figure=self.scene.mayavi_scene, **kwargs)
class Object(HasPrivateTraits):
"""Represent a 3d object in a mayavi scene."""
points = Array(float, shape=(None, 3))
trans = Array()
name = Str
scene = Instance(MlabSceneModel, ())
src = Instance(VTKDataSource)
# This should be Tuple, but it is broken on Anaconda as of 2016/12/16
color = RGBColor()
point_scale = Float(10, label='Point Scale')
opacity = Range(low=0., high=1., value=1.)
visible = Bool(True)
@on_trait_change('trans,points')
def _update_points(self):
"""Update the location of the plotted points."""
if not hasattr(self.src, 'data'):
return
trans = self.trans
if np.any(trans):
if trans.ndim == 0 or trans.shape == (3,) or trans.shape == (1, 3):
pts = self.points * trans
elif trans.shape == (3, 3):
pts = np.dot(self.points, trans.T)
elif trans.shape == (4, 4):
pts = apply_trans(trans, self.points)
else:
err = ("trans must be a scalar, a length 3 sequence, or an "
"array of shape (1,3), (3, 3) or (4, 4). "
"Got %s" % str(trans))
error(None, err, "Display Error")
raise ValueError(err)
else:
pts = self.points
self.src.data.points = pts
return True
class PointObject(Object):
"""Represent a group of individual points in a mayavi scene."""
label = Bool(False, enabled_when='visible')
text3d = List
glyph = Instance(Glyph)
resolution = Int(8)
view = View(HGroup(Item('visible', show_label=False),
Item('color', show_label=False),
Item('opacity')))
def __init__(self, view='points', *args, **kwargs):
"""Init.
Parameters
----------
view : 'points' | 'cloud'
Whether the view options should be tailored to individual points
or a point cloud.
"""
self._view = view
super(PointObject, self).__init__(*args, **kwargs)
def default_traits_view(self): # noqa: D102
color = Item('color', show_label=False)
scale = Item('point_scale', label='Size')
if self._view == 'points':
visible = Item('visible', label='Show', show_label=True)
view = View(HGroup(visible, color, scale, 'label'))
elif self._view == 'cloud':
visible = Item('visible', show_label=False)
view = View(HGroup(visible, color, scale))
else:
raise ValueError("PointObject(view = %r)" % self._view)
return view
@on_trait_change('label')
def _show_labels(self, show):
_toggle_mlab_render(self, False)
while self.text3d:
text = self.text3d.pop()
text.remove()
if show:
fig = self.scene.mayavi_scene
for i, pt in enumerate(np.array(self.src.data.points)):
x, y, z = pt
t = text3d(x, y, z, ' %i' % i, scale=.01, color=self.color,
figure=fig)
self.text3d.append(t)
_toggle_mlab_render(self, True)
@on_trait_change('visible')
def _on_hide(self):
if not self.visible:
self.label = False
@on_trait_change('scene.activated')
def _plot_points(self):
"""Add the points to the mayavi pipeline"""
from . import _testing_mode
if hasattr(self.glyph, 'remove'):
self.glyph.remove()
if hasattr(self.src, 'remove'):
self.src.remove()
_toggle_mlab_render(self, False)
x, y, z = self.points.T
scatter = pipeline.scalar_scatter(x, y, z)
fig = self.scene.mayavi_scene if not _testing_mode() else None
glyph = pipeline.glyph(scatter, color=self.color,
figure=fig,
scale_factor=self.point_scale, opacity=1.,
resolution=self.resolution)
glyph.actor.property.backface_culling = True
self.src = scatter
self.glyph = glyph
self.sync_trait('point_scale', self.glyph.glyph.glyph, 'scale_factor')
self.sync_trait('color', self.glyph.actor.property, mutual=False)
self.sync_trait('visible', self.glyph)
self.sync_trait('opacity', self.glyph.actor.property)
self.on_trait_change(self._update_points, 'points')
_toggle_mlab_render(self, True)
# self.scene.camera.parallel_scale = _scale
def _resolution_changed(self, new):
if not self.glyph:
return
self.glyph.glyph.glyph_source.glyph_source.phi_resolution = new
self.glyph.glyph.glyph_source.glyph_source.theta_resolution = new
class SurfaceObject(Object):
"""Represent a solid object in a mayavi scene.
Notes
-----
Doesn't automatically update plot because update requires both
:attr:`points` and :attr:`tri`. Call :meth:`plot` after updateing both
attributes.
"""
rep = Enum("Surface", "Wireframe")
tri = Array(int, shape=(None, 3))
surf = Instance(Surface)
view = View(HGroup(Item('visible', show_label=False),
Item('color', show_label=False),
Item('opacity')))
def clear(self): # noqa: D102
if hasattr(self.src, 'remove'):
self.src.remove()
if hasattr(self.surf, 'remove'):
self.surf.remove()
self.reset_traits(['src', 'surf'])
@on_trait_change('scene.activated')
def plot(self):
"""Add the points to the mayavi pipeline"""
_scale = self.scene.camera.parallel_scale
self.clear()
if not np.any(self.tri):
return
fig = self.scene.mayavi_scene
surf = complete_surface_info(dict(rr=self.points, tris=self.tri),
verbose='error')
src = _create_mesh_surf(surf, fig=fig)
rep = 'wireframe' if self.rep == 'Wireframe' else 'surface'
surf = pipeline.surface(src, figure=fig, color=self.color,
representation=rep, line_width=1)
surf.actor.property.backface_culling = True
self.src = src
self.surf = surf
self.sync_trait('visible', self.surf, 'visible')
self.sync_trait('color', self.surf.actor.property, mutual=False)
self.sync_trait('opacity', self.surf.actor.property)
self.scene.camera.parallel_scale = _scale
@on_trait_change('trans,points')
def _update_points(self):
if Object._update_points(self):
self.src.update() # necessary for SurfaceObject since Mayavi 4.5.0
|
jaeilepp/mne-python
|
mne/gui/_viewer.py
|
Python
|
bsd-3-clause
| 11,226
|
[
"Mayavi"
] |
76b21e956d1ee53c658aafef37634f90cb356b712110b7ee8dfc0c9610a56d9f
|
# Copyright 2013-2020 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class NetcdfCxx4(AutotoolsPackage):
"""NetCDF (network Common Data Form) is a set of software libraries and
machine-independent data formats that support the creation, access, and
sharing of array-oriented scientific data. This is the C++ distribution."""
homepage = "https://www.unidata.ucar.edu/software/netcdf"
url = "ftp://ftp.unidata.ucar.edu/pub/netcdf/netcdf-cxx4-4.3.1.tar.gz"
maintainers = ['WardF']
version('4.3.1', sha256='6a1189a181eed043b5859e15d5c080c30d0e107406fbb212c8fb9814e90f3445')
version('4.3.0', sha256='e34fbc6aba243ec82c23e9ee99db2430555ada849c54c1f3ab081b0ddd0f5f30')
# Usually the configure automatically inserts the pic flags, but we can
# force its usage with this variant.
variant('static', default=True, description='Enable building static libraries')
variant('shared', default=True, description='Enable shared library')
variant('pic', default=True, description='Produce position-independent code (for shared libs)')
variant('doxygen', default=True, description='Enable doxygen docs')
depends_on('netcdf-c')
depends_on('automake', type='build')
depends_on('autoconf', type='build')
depends_on('libtool', type='build')
depends_on('m4', type='build')
depends_on('doxygen', when='+doxygen', type='build')
conflicts('~shared', when='~static')
force_autoreconf = True
def flag_handler(self, name, flags):
if name == 'cflags' and '+pic' in self.spec:
flags.append(self.compiler.cc_pic_flag)
elif name == 'cppflags':
flags.append('-I' + self.spec['netcdf-c'].prefix.include)
return (None, None, flags)
@property
def libs(self):
shared = True
return find_libraries(
'libnetcdf_c++4', root=self.prefix, shared=shared, recursive=True
)
def configure_args(self):
config_args = []
if '+static' in self.spec:
config_args.append('--enable-static')
else:
config_args.append('--disable-static')
if '+shared' in self.spec:
config_args.append('--enable-shared')
else:
config_args.append('--disable-shared')
if '+pic' in self.spec:
config_args.append('--with-pic')
else:
config_args.append('--without-pic')
if '+doxygen' in self.spec:
config_args.append('--enable-doxygen')
else:
config_args.append('--disable-doxygen')
return config_args
|
rspavel/spack
|
var/spack/repos/builtin/packages/netcdf-cxx4/package.py
|
Python
|
lgpl-2.1
| 2,756
|
[
"NetCDF"
] |
743b1fa0e314cd2b9404d314577fd38ef2d8c58219974a9f68c2311d353bd15c
|
#pylint: disable=missing-docstring
#* This file is part of the MOOSE framework
#* https://www.mooseframework.org
#*
#* All rights reserved, see COPYRIGHT for full restrictions
#* https://github.com/idaholab/moose/blob/master/COPYRIGHT
#*
#* Licensed under LGPL 2.1, please see LICENSE for details
#* https://www.gnu.org/licenses/lgpl-2.1.html
import vtk
from .Options import Options
def get_options():
"""
Options specific to the legend.
"""
opt = Options()
opt.add('visible', True, "Control the visibility of the legend.")
opt.add('background', "Set the legend background color (defaults to graph background color).",
vtype=list)
opt.add('opacity', 0, "The legend background opacity.")
opt.add('label_color', [1, 1, 1], "The legend text color.")
opt.add('label_font_size', "The legend label test size in points.", vtype=int)
opt.add('point', "The location of the legend anchor point.", vtype=list)
opt.add('horizontal_alignment', 'right', "The horizontal alignment of the legend with respect "
"to the anchor point.",
vtype=str, allow=['left', 'center', 'right'])
opt.add('vertical_alignment', 'top', "The vertical alignment of the legend with respect to the "
"anchor point.",
vtype=str, allow=['top', 'bottom', 'center'])
opt.add('border', False, "Show the legend border.")
opt.add('border_color', [1, 1, 1], "The border color.")
opt.add('border_opacity', 1, "The border opacity.")
opt.add('border_width', "The border width.", vtype=float)
return opt
def set_options(vtkchart, vtkrenderer, opt):
"""
A method for updating the legend options.
"""
legend = vtkchart.GetLegend()
if opt.isOptionValid('visible'):
vtkchart.SetShowLegend(opt['visible'])
else:
vtkchart.SetShowLegend(True)
if opt.isOptionValid('background'):
legend.GetBrush().SetColorF(opt['background'])
else:
legend.GetBrush().SetColorF(vtkrenderer.GetBackground())
legend.GetLabelProperties().SetColor(opt['label_color'])
legend.GetBrush().SetOpacityF(opt['opacity'])
if opt.isOptionValid('label_font_size'):
legend.SetLabelSize(opt['label_font_size'])
if opt.isOptionValid('point'):
pt = opt['point']
legend.SetVerticalAlignment(vtk.vtkChartLegend.CUSTOM)
legend.SetHorizontalAlignment(vtk.vtkChartLegend.CUSTOM)
coord = vtk.vtkCoordinate()
coord.SetCoordinateSystemToNormalizedViewport()
coord.SetValue(pt[0], pt[1], 0)
loc = coord.GetComputedViewportValue(vtkrenderer)
legend.SetPoint(*loc)
else:
legend.SetVerticalAlignment(eval('vtk.vtkChartLegend.' +
opt['vertical_alignment'].upper()))
legend.SetHorizontalAlignment(eval('vtk.vtkChartLegend.' +
opt['horizontal_alignment'].upper()))
if opt.isOptionValid('border'):
legend.GetPen().SetOpacity(opt['border_opacity'])
legend.GetPen().SetColorF(opt['border_color'])
if opt.isOptionValid('border_width'):
legend.GetPen().SetWidth(opt['border_width'])
else:
legend.GetPen().SetOpacity(0)
|
nuclear-wizard/moose
|
python/chigger/utils/LegendOptions.py
|
Python
|
lgpl-2.1
| 3,315
|
[
"MOOSE",
"VTK"
] |
62195e0b2af3c7c7ea2ff4a6a99e85bd3bfc9e252ab7da018c81ed4a6eaa01dd
|
"""
DIRAC.Resources package
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
__RCSID__ = "$Id$"
|
yujikato/DIRAC
|
src/DIRAC/Resources/__init__.py
|
Python
|
gpl-3.0
| 162
|
[
"DIRAC"
] |
0f6e8ecda42389c2c43a9029445910f0ab7f52cc0b8ef93239c757112e186ba9
|
import json
import logging
from difflib import ndiff
import waffle
from django import forms
from django.conf import settings
from django.core.exceptions import ObjectDoesNotExist
from django.forms.widgets import CheckboxSelectMultiple
from django.template.loader import render_to_string
from django.utils import translation
from django.utils.safestring import mark_safe
from django.utils.six import string_types
from django.utils.translation import ugettext
from django.utils.translation import ugettext_lazy as _
from taggit.utils import parse_tags
import kuma.wiki.content
from kuma.core.form_fields import StrippedCharField
from kuma.core.urlresolvers import reverse
from kuma.spam.akismet import AkismetError
from kuma.spam.forms import AkismetCheckFormMixin, AkismetSubmissionFormMixin
from .constants import (DOCUMENT_PATH_RE, INVALID_DOC_SLUG_CHARS_RE,
INVALID_REV_SLUG_CHARS_RE, LOCALIZATION_FLAG_TAGS,
REVIEW_FLAG_TAGS, SLUG_CLEANSING_RE,
SPAM_OTHER_HEADERS, SPAM_SUBMISSION_REVISION_FIELDS,
SPAM_TRAINING_SWITCH)
from .events import EditDocumentEvent
from .models import (Document, DocumentSpamAttempt, DocumentTag, Revision,
RevisionAkismetSubmission, RevisionIP, valid_slug_parent)
from .tasks import send_first_edit_email
TITLE_REQUIRED = _(u'Please provide a title.')
TITLE_SHORT = _(u'The title is too short (%(show_value)s characters). '
u'It must be at least %(limit_value)s characters.')
TITLE_LONG = _(u'Please keep the length of the title to %(limit_value)s '
u'characters or less. It is currently %(show_value)s '
u'characters.')
TITLE_PLACEHOLDER = _(u'Name Your Article')
TAGS_LONG = _(u'The tags field is too long (%(show_value)s characters). '
u'Keep the total length to %(limit_value)s characters.')
SLUG_REQUIRED = _(u'Please provide a slug.')
SLUG_INVALID = _(u'The slug provided is not valid.')
SLUG_SHORT = _(u'The slug is too short (%(show_value)s characters). '
u'It must be at least %(limit_value)s characters.')
SLUG_LONG = _(u'Please keep the length of the slug to %(limit_value)s '
u'characters or less. It is currently %(show_value)s '
u'characters.')
SUMMARY_REQUIRED = _(u'Please provide a summary.')
SUMMARY_SHORT = _(u'The summary is too short (%(show_value)s characters). '
u'It must be at least %(limit_value)s characters.')
SUMMARY_LONG = _(u'Please keep the length of the summary to '
u'%(limit_value)s characters or less. It is currently '
u'%(show_value)s characters.')
CONTENT_REQUIRED = _(u'Please provide content.')
CONTENT_SHORT = _(u'The content is too short (%(show_value)s characters). '
u'It must be at least %(limit_value)s characters.')
CONTENT_LONG = _(u'Please keep the length of the content to '
u'%(limit_value)s characters or less. It is currently '
u'%(show_value)s characters.')
COMMENT_LONG = _(u'Please keep the length of the comment to '
u'%(limit_value)s characters or less. It is currently '
u'%(show_value)s characters.')
SLUG_COLLIDES = _(u'Another document with this slug already exists.')
OTHER_COLLIDES = _(u'Another document with this metadata already exists.')
MIDAIR_COLLISION = _(u'Publishing failed. Conflicting edit attempts detected. '
u'Please copy and paste your edits to a safe place and '
u'visit the <a href="%(url)s">revision history</a> page '
u'to see what was changed before making further edits.')
MOVE_REQUIRED = _(u"Changing this document's slug requires "
u"moving it and its children.")
log = logging.getLogger('kuma.wiki.forms')
class AkismetRevisionData(object):
"""
Collect Akismet data at creation time or later.
This can be used in three different scenarios:
- A user is creating a Document
- A user is editing a Document
- A user created or edited a document in the past
Derived classes customize __init__ to gather data from the relevant
instances, and then .parameters will have the Akismet submission.
"""
def __init__(self):
"""Initialize the parameters."""
self.default_language = settings.WIKI_DEFAULT_LANGUAGE
self.parameters = {
'blog_charset': 'UTF-8',
'comment_type': 'wiki-revision'
}
def akismet_lang(self, language):
"""
Convert a Django language name to an Akismet blog_lang identifier.
E.g.: "en-US" to "en_us"
"""
return translation.to_locale(language).lower()
def content_from_form(self, cleaned_data):
"""Create a combined content string from form data."""
parts = []
for field in SPAM_SUBMISSION_REVISION_FIELDS:
value = cleaned_data.get(field, u'')
if field == 'tags':
value = self.split_tags(value)
parts.append(value)
return u'\n'.join(parts)
def content_from_document(self, document):
"""Create a combined content string from a document."""
parts = []
current_revision = document.current_revision
assert current_revision, "document must have a current revision."
for field in SPAM_SUBMISSION_REVISION_FIELDS:
if field == 'comment':
value = u''
elif field == 'content':
value = current_revision.content
elif field == 'tags':
value = self.split_tags(current_revision.tags)
else:
value = getattr(document, field, '')
parts.append(value)
return u'\n'.join(parts)
def content_from_revision(self, revision):
"""Create a combined content string from a Revision."""
parts = []
for field in SPAM_SUBMISSION_REVISION_FIELDS:
value = getattr(revision, field) or u''
if field == 'tags':
value = self.split_tags(value)
parts.append(value)
return u'\n'.join(parts)
def set_blog(self, request):
"""Set the blog parameter from the request object."""
self.parameters['blog'] = request.build_absolute_uri('/')
def set_blog_lang(self, language=None):
"""
Set the blog_lang from a Django language name.
If the language is not English, then report that the content may be a
combination of the target language and untranslated English.
"""
language = language or self.default_language
if language == self.default_language:
blog_lang = self.akismet_lang(language)
else:
blog_lang = '%s, %s' % (
self.akismet_lang(language),
self.akismet_lang(self.default_language))
self.parameters['blog_lang'] = blog_lang
def set_by_edit_request(self, request):
"""
Add data from the content creator's request object.
Includes:
- The base blog address
- The author information
- Named HTTP headers
- Other HTTP headers, as modeled by Akismet's Wordpress plugin:
https://plugins.trac.wordpress.org/browser/akismet/trunk/class.akismet.php
"""
self.set_blog(request)
self.set_comment_author(request.user)
meta = request.META
self.parameters.update({
'referrer': meta.get('HTTP_REFERER', ''),
'user_agent': meta.get('HTTP_USER_AGENT', ''),
'user_ip': meta.get('REMOTE_ADDR', ''),
})
for key, value in meta.items():
if not isinstance(value, string_types):
continue
if key.startswith('HTTP_COOKIE'):
continue
if key.startswith('HTTP_') or key in SPAM_OTHER_HEADERS:
self.parameters[key] = value
def set_comment_author(self, user):
"""Set the comment author from a User object."""
self.parameters.update({
'comment_author': (user.fullname or user.get_full_name() or
user.username),
'comment_author_email': user.email,
})
def set_content(self, new_content, existing_content=None):
"""Set comment_content to the new and changed non-empty lines."""
existing_content = existing_content or u''
diff = ndiff(existing_content.splitlines(1), new_content.splitlines(1))
lines = []
for line in diff:
if line.startswith('+ '):
diff_content = line[2:].strip()
if diff_content:
lines.append(diff_content)
self.parameters['comment_content'] = u'\n'.join(lines)
def set_permalink(self, document, request):
"""Set the permalink for the Document."""
doc_url = document.get_absolute_url()
self.parameters['permalink'] = request.build_absolute_uri(doc_url)
def split_tags(self, tag_string):
"""Turn '"Tag 2" "Tag 1"' into 'Tag 1\nTag 2'."""
return u'\n'.join(parse_tags(tag_string))
class AkismetNewDocumentData(AkismetRevisionData):
"""Collect Akismet data for a user creating a new document."""
def __init__(self, request, cleaned_data, language=None):
"""
Initialize from a new document form submission by the author.
Keyword Parameters:
request - the Request for the author
cleaned_data - the validated form data
language - the language of the revision being created
"""
super(AkismetNewDocumentData, self).__init__()
self.set_by_edit_request(request)
self.set_blog_lang(language)
new_content = self.content_from_form(cleaned_data)
self.set_content(new_content)
class AkismetNewTranslationData(AkismetRevisionData):
"""Collect Akismet data for a user creating a new translation."""
def __init__(self, request, cleaned_data, english_document, language):
"""
Initialize from a new translation form submission by the author.
Keyword Parameters:
request - the Request for the author
cleaned_data - the validated form data
english_document - the original English document
language - the language of the revision being created
"""
super(AkismetNewTranslationData, self).__init__()
self.set_by_edit_request(request)
self.set_blog_lang(language)
new_content = self.content_from_form(cleaned_data)
existing_content = self.content_from_document(english_document)
self.set_content(new_content, existing_content)
class AkismetEditDocumentData(AkismetRevisionData):
"""Collect Akismet data for a user editing an existing document."""
def __init__(self, request, cleaned_data, document):
"""
Initialize from an edit page form submission by the author.
Keyword Parameters:
request - the Request for the author
cleaned_data - the validated form data
document - the document the user is editing
"""
super(AkismetEditDocumentData, self).__init__()
self.set_by_edit_request(request)
self.set_blog_lang(document.locale)
self.set_permalink(document, request)
new_content = self.content_from_form(cleaned_data)
existing_content = self.content_from_document(document)
self.set_content(new_content, existing_content)
class AkismetHistoricalData(AkismetRevisionData):
"""Collect Akismet data from a historical revision."""
def __init__(self, revision, request=None):
"""Initialize from a historical revision.
Keyword Parameters:
revision - the historical Revision
request - an optional request object
"""
assert revision.id, "Must be a saved Revision."
assert revision.document_id, "Must be a Revision with a Document."
super(AkismetHistoricalData, self).__init__()
revision_ip = revision.revisionip_set.first()
if revision_ip:
if revision_ip.data:
# Use captured Akismet submission
self.parameters = json.loads(revision_ip.data)
return
else:
self.parameters.update({
'user_ip': revision_ip.ip,
'user_agent': revision_ip.user_agent,
'referrer': revision_ip.referrer,
})
else:
self.parameters.update({
'user_ip': '0.0.0.0',
'user_agent': '',
'referrer': '',
})
document = revision.document
self.set_blog_lang(document.locale)
if request:
self.set_blog(request)
self.set_permalink(document, request)
self.set_comment_author(revision.creator)
new_content = self.content_from_revision(revision)
old_revision = revision.get_previous()
if old_revision:
old_content = self.content_from_revision(old_revision)
else:
old_content = None
self.set_content(new_content, old_content)
class DocumentForm(forms.ModelForm):
"""
Used for managing the wiki document data model that houses general
data of a wiki page.
"""
title = StrippedCharField(min_length=1,
max_length=255,
widget=forms.TextInput(
attrs={'placeholder': TITLE_PLACEHOLDER}),
label=_(u'Title:'),
help_text=_(u'Title of article'),
error_messages={'required': TITLE_REQUIRED,
'min_length': TITLE_SHORT,
'max_length': TITLE_LONG})
slug = StrippedCharField(min_length=1,
max_length=255,
widget=forms.TextInput(),
label=_(u'Slug:'),
help_text=_(u'Article URL'),
error_messages={'required': SLUG_REQUIRED,
'min_length': SLUG_SHORT,
'max_length': SLUG_LONG})
parent_topic = forms.ModelChoiceField(queryset=Document.objects.all(),
required=False,
label=_(u'Parent:'))
locale = forms.CharField(widget=forms.HiddenInput())
class Meta:
model = Document
fields = ('title', 'slug', 'locale')
def __init__(self, *args, **kwargs):
# when creating a new document with a parent, this will be set
self.parent_slug = kwargs.pop('parent_slug', None)
super(DocumentForm, self).__init__(*args, **kwargs)
def clean_slug(self):
from kuma.wiki.urls import non_document_patterns
slug = self.cleaned_data['slug']
if slug == '':
# Default to the title, if missing.
slug = self.cleaned_data['title']
elif self.parent_slug:
# Prepend parent slug if given from view
slug = self.parent_slug + '/' + slug
# check both for disallowed characters and match for the allowed
if (INVALID_DOC_SLUG_CHARS_RE.search(slug) or
not DOCUMENT_PATH_RE.search(slug)):
raise forms.ValidationError(SLUG_INVALID)
# Guard against slugs that match reserved URL patterns.
for url_pattern in non_document_patterns:
if url_pattern.resolve(slug):
raise forms.ValidationError(SLUG_INVALID)
return slug
def save(self, parent=None, *args, **kwargs):
"""Persist the Document form, and return the saved Document."""
doc = super(DocumentForm, self).save(commit=False, *args, **kwargs)
doc.parent = parent
if 'parent_topic' in self.cleaned_data:
doc.parent_topic = self.cleaned_data['parent_topic']
doc.save()
# not strictly necessary since we didn't change
# any m2m data since we instantiated the doc
self.save_m2m()
return doc
class RevisionForm(AkismetCheckFormMixin, forms.ModelForm):
"""
Form to create new revisions.
"""
title = StrippedCharField(
min_length=1,
max_length=255,
required=False,
widget=forms.TextInput(attrs={'placeholder': TITLE_PLACEHOLDER}),
label=_(u'Title:'),
help_text=_(u'Title of article'),
error_messages={
'required': TITLE_REQUIRED,
'min_length': TITLE_SHORT,
'max_length': TITLE_LONG,
}
)
slug = StrippedCharField(
min_length=1,
max_length=255,
required=False,
widget=forms.TextInput(),
label=_(u'Slug:'),
help_text=_(u'Article URL'),
error_messages={
'required': SLUG_REQUIRED,
'min_length': SLUG_SHORT,
'max_length': SLUG_LONG,
}
)
tags = StrippedCharField(
required=False,
max_length=255,
label=_(u'Tags:'),
error_messages={
'max_length': TAGS_LONG,
}
)
keywords = StrippedCharField(
required=False,
label=_(u'Keywords:'),
help_text=_(u'Affects search results'),
)
summary = StrippedCharField(
required=False,
min_length=5,
max_length=1000,
widget=forms.Textarea(),
label=_(u'Search result summary:'),
help_text=_(u'Only displayed on search results page'),
error_messages={
'required': SUMMARY_REQUIRED,
'min_length': SUMMARY_SHORT,
'max_length': SUMMARY_LONG
},
)
content = StrippedCharField(
min_length=5,
max_length=300000,
label=_(u'Content:'),
widget=forms.Textarea(),
error_messages={
'required': CONTENT_REQUIRED,
'min_length': CONTENT_SHORT,
'max_length': CONTENT_LONG,
}
)
comment = StrippedCharField(
max_length=255,
required=False,
label=_(u'Comment:')
)
review_tags = forms.MultipleChoiceField(
label=ugettext("Tag this revision for review?"),
widget=CheckboxSelectMultiple,
required=False,
choices=REVIEW_FLAG_TAGS,
)
localization_tags = forms.MultipleChoiceField(
label=ugettext("Tag this revision for localization?"),
widget=CheckboxSelectMultiple,
required=False,
choices=LOCALIZATION_FLAG_TAGS,
)
current_rev = forms.CharField(
required=False,
widget=forms.HiddenInput(),
)
class Meta(object):
model = Revision
fields = ('title', 'slug', 'tags', 'keywords', 'summary', 'content',
'comment', 'based_on', 'toc_depth',
'render_max_age')
def __init__(self, *args, **kwargs):
self.section_id = kwargs.pop('section_id', None)
self.is_async_submit = kwargs.pop('is_async_submit', None)
# when creating a new document with a parent, this will be set
self.parent_slug = kwargs.pop('parent_slug', None)
super(RevisionForm, self).__init__(*args, **kwargs)
self.fields['based_on'].widget = forms.HiddenInput()
if self.instance and self.instance.pk:
# Ensure both title and slug are populated from parent document,
# if last revision didn't have them
if not self.instance.title:
self.initial['title'] = self.instance.document.title
if not self.instance.slug:
self.initial['slug'] = self.instance.document.slug
content = self.instance.content
parsed_content = kuma.wiki.content.parse(content)
parsed_content.injectSectionIDs()
if self.section_id:
parsed_content.extractSection(self.section_id)
parsed_content.filterEditorSafety()
content = parsed_content.serialize()
self.initial['content'] = content
self.initial['review_tags'] = list(self.instance
.review_tags
.names())
self.initial['localization_tags'] = list(self.instance
.localization_tags
.names())
if self.section_id:
self.fields['toc_depth'].required = False
def clean_slug(self):
# Since this form can change the URL of the page on which the editing
# happens, changes to the slug are ignored for an iframe submissions
if self.is_async_submit:
return self.instance.document.slug
# Get the cleaned slug
slug = self.cleaned_data['slug']
# first check if the given slug doesn't contain slashes and other
# characters not allowed in a revision slug component (without parent)
if slug and INVALID_REV_SLUG_CHARS_RE.search(slug):
raise forms.ValidationError(SLUG_INVALID)
# edits can come in without a slug, so default to the current doc slug
if not slug:
try:
slug = self.instance.slug = self.instance.document.slug
except ObjectDoesNotExist:
pass
# then if there is a parent document we prefix the slug with its slug
if self.parent_slug:
slug = u'/'.join([self.parent_slug, slug])
try:
doc = Document.objects.get(locale=self.instance.document.locale,
slug=slug)
if self.instance and self.instance.document:
if (not doc.get_redirect_url() and
doc.pk != self.instance.document.pk):
# There's another document with this value,
# and we're not a revision of it.
raise forms.ValidationError(SLUG_COLLIDES)
else:
# This document-and-revision doesn't exist yet, so there
# shouldn't be any collisions at all.
raise forms.ValidationError(SLUG_COLLIDES)
except Document.DoesNotExist:
# No existing document for this value, so we're good here.
pass
return slug
def clean_tags(self):
"""
Validate the tags ensuring we have no case-sensitive duplicates.
"""
tags = self.cleaned_data['tags']
cleaned_tags = []
if tags:
for tag in parse_tags(tags):
# Note: The exact match query doesn't work correctly with
# MySQL with regards to case-sensitivity. If we move to
# Postgresql in the future this code may need to change.
doc_tag = (DocumentTag.objects.filter(name__exact=tag)
.values_list('name', flat=True))
# Write a log we can grep to help find pre-existing duplicate
# document tags for cleanup.
if len(doc_tag) > 1:
log.warn('Found duplicate document tags: %s' % doc_tag)
if doc_tag:
if doc_tag[0] != tag and doc_tag[0].lower() == tag.lower():
# The tag differs only by case. Do not add a new one,
# add the existing one.
cleaned_tags.append(doc_tag[0])
continue
cleaned_tags.append(tag)
return ' '.join([u'"%s"' % t for t in cleaned_tags])
def clean_content(self):
"""
Validate the content, performing any section editing if necessary
"""
content = self.cleaned_data['content']
# If we're editing a section, we need to replace the section content
# from the current revision.
if self.section_id and self.instance and self.instance.document:
# Make sure we start with content form the latest revision.
full_content = self.instance.document.current_revision.content
# Replace the section content with the form content.
parsed_content = kuma.wiki.content.parse(full_content)
parsed_content.replaceSection(self.section_id, content)
content = parsed_content.serialize()
return content
def clean_current_rev(self):
"""
If a current revision is supplied in the form, compare it against
what the document claims is the current revision. If there's a
difference, then an edit has occurred since the form was constructed
and we treat it as a mid-air collision.
"""
current_rev = self.cleaned_data.get('current_rev', None)
if not current_rev:
# If there's no current_rev, just bail.
return current_rev
try:
doc_current_rev = self.instance.document.current_revision.id
if unicode(current_rev) != unicode(doc_current_rev):
if (self.section_id and self.instance and
self.instance.document):
# This is a section edit. So, even though the revision has
# changed, it still might not be a collision if the section
# in particular hasn't changed.
orig_ct = (Revision.objects
.get(pk=current_rev)
.get_section_content(self.section_id))
curr_ct = (self.instance
.document.current_revision
.get_section_content(self.section_id))
if orig_ct != curr_ct:
# Oops. Looks like the section did actually get
# changed, so yeah this is a collision.
url = reverse(
'wiki.document_revisions',
kwargs={'document_path': self.instance.document.slug}
)
raise forms.ValidationError(MIDAIR_COLLISION % {'url': url})
return current_rev
else:
# No section edit, so this is a flat-out collision.
url = reverse(
'wiki.document_revisions',
kwargs={'document_path': self.instance.document.slug}
)
raise forms.ValidationError(MIDAIR_COLLISION % {'url': url})
except Document.DoesNotExist:
# If there's no document yet, just bail.
return current_rev
@property
def akismet_error_message(self):
request = getattr(self, 'request', None)
user = request and request.user
return mark_safe(render_to_string('wiki/includes/spam_error.html',
{'user': user}))
def akismet_error(self, parameters, exception=None):
"""
Upon errors from the Akismet API records the user, document
and date of the attempt for further analysis. Then call the
parent class' error handler.
"""
try:
document = self.instance.document
except ObjectDoesNotExist:
document = None
if exception and isinstance(exception, AkismetError):
# For Akismet errors, save the submission and exception details
dsa_params = parameters.copy()
dsa_params['akismet_status_code'] = exception.status_code
dsa_params['akismet_debug_help'] = exception.debug_help
dsa_params['akismet_response'] = exception.response.content
review = DocumentSpamAttempt.AKISMET_ERROR
else:
# For detected spam, save the details for review
dsa_params = parameters
review = DocumentSpamAttempt.NEEDS_REVIEW
# Wrapping this in a try/finally to make sure that even if
# creating a spam attempt object fails we call the parent
# method that raises a ValidationError
try:
DocumentSpamAttempt.objects.create(
title=self.cleaned_data['title'],
slug=self.cleaned_data['slug'],
user=self.request.user,
document=document,
data=json.dumps(dsa_params, indent=2, sort_keys=True),
review=review
)
finally:
if not waffle.switch_is_active(SPAM_TRAINING_SWITCH):
super(RevisionForm, self).akismet_error(parameters, exception)
def akismet_parameters(self):
"""
Returns the parameters for Akismet's check-comment API endpoint.
The form cleaning also saves the data into the instance, which will
cause future calls to return different data. The results during the
initial form cleaning are cached in ._akismet_data, and returned for
future calls, such as the unit tests.
"""
if not getattr(self, '_akismet_data', None):
try:
document = self.instance.document
except ObjectDoesNotExist:
self._akismet_data = AkismetNewDocumentData(
self.request, self.cleaned_data, self.data.get('locale'))
else:
if document.current_revision:
self._akismet_data = AkismetEditDocumentData(
self.request, self.cleaned_data, document)
else:
# New translation, compare to English document
based_on = self.cleaned_data.get('based_on')
assert based_on, 'Expected a new translation.'
document = based_on.document
self._akismet_data = AkismetNewTranslationData(
self.request, self.cleaned_data, document,
self.data.get('locale'))
parameters = self._akismet_data.parameters.copy()
parameters.update(self.akismet_parameter_overrides())
return parameters
def akismet_call(self, parameters):
"""
Skip Akismet check if the content is the same.
This happens if the edit is to a non-content field, such as
setting or clearing the technical review flag.
"""
if not parameters['comment_content']:
return False # No content change, not spam
return super(RevisionForm, self).akismet_call(parameters)
def save(self, document, **kwargs):
"""
Persists the revision and returns it.
Takes the view request and document of the revision.
Does some specific things when the revision is fully saved.
"""
# have to check for first edit before we save
is_first_edit = not self.request.user.wiki_revisions().exists()
# Making sure we don't commit the saving right away since we
# want to do other things here.
kwargs['commit'] = False
if self.section_id and self.instance and self.instance.document:
# The logic to save a section is slightly different and may
# need to evolve over time; a section edit doesn't submit
# all the fields, and we need to account for that when we
# construct the new Revision.
doc = Document.objects.get(pk=self.instance.document.id)
old_rev = doc.current_revision
new_rev = super(RevisionForm, self).save(**kwargs)
new_rev.document = document
new_rev.creator = self.request.user
new_rev.toc_depth = old_rev.toc_depth
new_rev.save()
new_rev.review_tags.set(*list(old_rev.review_tags.names()))
else:
new_rev = super(RevisionForm, self).save(**kwargs)
new_rev.document = document
new_rev.creator = self.request.user
new_rev.toc_depth = self.cleaned_data['toc_depth']
new_rev.save()
new_rev.review_tags.set(*self.cleaned_data['review_tags'])
new_rev.localization_tags.set(*self.cleaned_data['localization_tags'])
# when enabled store the user's IP address
if waffle.switch_is_active('store_revision_ips'):
RevisionIP.objects.log(
revision=new_rev,
headers=self.request.META,
data=json.dumps(self.akismet_parameters(),
indent=2, sort_keys=True)
)
# send first edit emails
if is_first_edit:
send_first_edit_email.delay(new_rev.pk)
# schedule a document rendering
document.schedule_rendering('max-age=0')
# schedule event notifications
EditDocumentEvent(new_rev).fire(exclude=new_rev.creator)
return new_rev
class RevisionAkismetSubmissionAdminForm(AkismetSubmissionFormMixin,
forms.ModelForm):
"""
A model form used in the admin UI to submit missed spam or ham.
In the Django admin, an admin can both mark a revision as missed spam,
and correct an incorrectly marked spam.
The ``AkismetSubmissionFormMixin`` class submits the data to Akismet in
the ``clean`` method, using the override methods in this and derived
classes. Users of the form must set the ``sender`` to the request user
before calling ``is_valid()``.
"""
class Meta(object):
model = RevisionAkismetSubmission
exclude = ['sender', 'sent']
def akismet_submission_type(self):
"""The submission type is determined from the submitted form data."""
return self.cleaned_data['type']
def akismet_parameters(self):
"""
Returns parameter dict to pass to Akismet's submission API endpoints.
"""
revision = self.cleaned_data['revision']
akismet_data = AkismetHistoricalData(revision, self.request)
return akismet_data.parameters
def clean(self):
if 'revision' not in self.cleaned_data:
raise forms.ValidationError(
_('Unable to make the Akismet submission (invalid revision).'),
code='invalid'
)
return super(RevisionAkismetSubmissionAdminForm, self).clean()
class RevisionAkismetSubmissionSpamForm(RevisionAkismetSubmissionAdminForm):
"""
A model form for submitting missed spam.
For public dashboards, the only valid submission type is spam, so the
type is omitted from the form and hard-coded as "spam".
"""
class Meta(RevisionAkismetSubmissionAdminForm.Meta):
exclude = ['sender', 'sent', 'type']
def akismet_submission_type(self):
"""Force the submission type to spam."""
return "spam"
class TreeMoveForm(forms.Form):
title = StrippedCharField(min_length=1, max_length=255,
required=False,
widget=forms.TextInput(
attrs={'placeholder': TITLE_PLACEHOLDER}),
label=_(u'Title:'),
help_text=_(u'Title of article'),
error_messages={'required': TITLE_REQUIRED,
'min_length': TITLE_SHORT,
'max_length': TITLE_LONG})
slug = StrippedCharField(min_length=1, max_length=255,
widget=forms.TextInput(),
label=_(u'New slug:'),
help_text=_(u'New article URL'),
error_messages={'required': SLUG_REQUIRED,
'min_length': SLUG_SHORT,
'max_length': SLUG_LONG})
locale = StrippedCharField(min_length=2, max_length=5,
widget=forms.HiddenInput())
def clean_slug(self):
slug = self.cleaned_data['slug']
# We only want the slug here; inputting a full URL would lead
# to disaster.
if '://' in slug:
raise forms.ValidationError('Please enter only the slug to move '
'to, not the full URL.')
# Removes leading slash and {locale/docs/} if necessary
# IMPORTANT: This exact same regex is used on the client side, so
# update both if doing so
slug = SLUG_CLEANSING_RE.sub('', slug)
# Remove the trailing slash if one is present, because it
# will screw up the page move, which doesn't expect one.
return slug.rstrip('/')
def clean(self):
cleaned_data = super(TreeMoveForm, self).clean()
if set(['slug', 'locale']).issubset(cleaned_data):
slug, locale = cleaned_data['slug'], cleaned_data['locale']
try:
valid_slug_parent(slug, locale)
except Exception as e:
raise forms.ValidationError(e.args[0])
return cleaned_data
class DocumentDeletionForm(forms.Form):
reason = forms.CharField(widget=forms.Textarea(attrs={'autofocus': 'true'}))
|
jwhitlock/kuma
|
kuma/wiki/forms.py
|
Python
|
mpl-2.0
| 37,759
|
[
"VisIt"
] |
12622de5a7d831f69169045c25ab28b4b9993c70e29dd8749bf813e429fc5e55
|
"""
====================================================================
K-means clustering and vector quantization (:mod:`scipy.cluster.vq`)
====================================================================
Provides routines for k-means clustering, generating code books
from k-means models, and quantizing vectors by comparing them with
centroids in a code book.
.. autosummary::
:toctree: generated/
whiten -- Normalize a group of observations so each feature has unit variance
vq -- Calculate code book membership of a set of observation vectors
kmeans -- Performs k-means on a set of observation vectors forming k clusters
kmeans2 -- A different implementation of k-means with more methods
-- for initializing centroids
Background information
======================
The k-means algorithm takes as input the number of clusters to
generate, k, and a set of observation vectors to cluster. It
returns a set of centroids, one for each of the k clusters. An
observation vector is classified with the cluster number or
centroid index of the centroid closest to it.
A vector v belongs to cluster i if it is closer to centroid i than
any other centroids. If v belongs to i, we say centroid i is the
dominating centroid of v. The k-means algorithm tries to
minimize distortion, which is defined as the sum of the squared distances
between each observation vector and its dominating centroid. Each
step of the k-means algorithm refines the choices of centroids to
reduce distortion. The change in distortion is used as a
stopping criterion: when the change is lower than a threshold, the
k-means algorithm is not making sufficient progress and
terminates. One can also define a maximum number of iterations.
Since vector quantization is a natural application for k-means,
information theory terminology is often used. The centroid index
or cluster index is also referred to as a "code" and the table
mapping codes to centroids and vice versa is often referred as a
"code book". The result of k-means, a set of centroids, can be
used to quantize vectors. Quantization aims to find an encoding of
vectors that reduces the expected distortion.
All routines expect obs to be a M by N array where the rows are
the observation vectors. The codebook is a k by N array where the
i'th row is the centroid of code word i. The observation vectors
and centroids have the same feature dimension.
As an example, suppose we wish to compress a 24-bit color image
(each pixel is represented by one byte for red, one for blue, and
one for green) before sending it over the web. By using a smaller
8-bit encoding, we can reduce the amount of data by two
thirds. Ideally, the colors for each of the 256 possible 8-bit
encoding values should be chosen to minimize distortion of the
color. Running k-means with k=256 generates a code book of 256
codes, which fills up all possible 8-bit sequences. Instead of
sending a 3-byte value for each pixel, the 8-bit centroid index
(or code word) of the dominating centroid is transmitted. The code
book is also sent over the wire so each 8-bit code can be
translated back to a 24-bit pixel value representation. If the
image of interest was of an ocean, we would expect many 24-bit
blues to be represented by 8-bit codes. If it was an image of a
human face, more flesh tone colors would be represented in the
code book.
"""
from __future__ import division, print_function, absolute_import
__docformat__ = 'restructuredtext'
__all__ = ['whiten', 'vq', 'kmeans', 'kmeans2']
# TODO:
# - implements high level method for running several times k-means with
# different initialialization
# - warning: what happens if different number of clusters ? For now, emit a
# warning, but it is not great, because I am not sure it really make sense to
# succeed in this case (maybe an exception is better ?)
import warnings
from numpy.random import randint
from numpy import (shape, zeros, sqrt, argmin, minimum, array, newaxis,
common_type, single, double, take, std, mean)
import numpy as np
from . import _vq
class ClusterError(Exception):
pass
def whiten(obs):
"""
Normalize a group of observations on a per feature basis.
Before running k-means, it is beneficial to rescale each feature
dimension of the observation set with whitening. Each feature is
divided by its standard deviation across all observations to give
it unit variance.
Parameters
----------
obs : ndarray
Each row of the array is an observation. The
columns are the features seen during each observation.
>>> # f0 f1 f2
>>> obs = [[ 1., 1., 1.], #o0
... [ 2., 2., 2.], #o1
... [ 3., 3., 3.], #o2
... [ 4., 4., 4.]] #o3
Returns
-------
result : ndarray
Contains the values in `obs` scaled by the standard deviation
of each column.
Examples
--------
>>> from scipy.cluster.vq import whiten
>>> features = np.array([[1.9, 2.3, 1.7],
... [1.5, 2.5, 2.2],
... [0.8, 0.6, 1.7,]])
>>> whiten(features)
array([[ 4.17944278, 2.69811351, 7.21248917],
[ 3.29956009, 2.93273208, 9.33380951],
[ 1.75976538, 0.7038557 , 7.21248917]])
"""
std_dev = std(obs, axis=0)
zero_std_mask = std_dev == 0
if zero_std_mask.any():
std_dev[zero_std_mask] = 1.0
warnings.warn("Some columns have standard deviation zero. "
"The values of these columns will not change.",
RuntimeWarning)
return obs / std_dev
def vq(obs, code_book):
"""
Assign codes from a code book to observations.
Assigns a code from a code book to each observation. Each
observation vector in the 'M' by 'N' `obs` array is compared with the
centroids in the code book and assigned the code of the closest
centroid.
The features in `obs` should have unit variance, which can be
achieved by passing them through the whiten function. The code
book can be created with the k-means algorithm or a different
encoding algorithm.
Parameters
----------
obs : ndarray
Each row of the 'M' x 'N' array is an observation. The columns are
the "features" seen during each observation. The features must be
whitened first using the whiten function or something equivalent.
code_book : ndarray
The code book is usually generated using the k-means algorithm.
Each row of the array holds a different code, and the columns are
the features of the code.
>>> # f0 f1 f2 f3
>>> code_book = [
... [ 1., 2., 3., 4.], #c0
... [ 1., 2., 3., 4.], #c1
... [ 1., 2., 3., 4.]] #c2
Returns
-------
code : ndarray
A length M array holding the code book index for each observation.
dist : ndarray
The distortion (distance) between the observation and its nearest
code.
Examples
--------
>>> from numpy import array
>>> from scipy.cluster.vq import vq
>>> code_book = array([[1.,1.,1.],
... [2.,2.,2.]])
>>> features = array([[ 1.9,2.3,1.7],
... [ 1.5,2.5,2.2],
... [ 0.8,0.6,1.7]])
>>> vq(features,code_book)
(array([1, 1, 0],'i'), array([ 0.43588989, 0.73484692, 0.83066239]))
"""
ct = common_type(obs, code_book)
# avoid copying when dtype is the same
# should be replaced with c_obs = astype(ct, copy=False)
# when we get to numpy 1.7.0
if obs.dtype != ct:
c_obs = obs.astype(ct)
else:
c_obs = obs
if code_book.dtype != ct:
c_code_book = code_book.astype(ct)
else:
c_code_book = code_book
if ct in (single, double):
results = _vq.vq(c_obs, c_code_book)
else:
results = py_vq(obs, code_book)
return results
def py_vq(obs, code_book):
""" Python version of vq algorithm.
The algorithm computes the euclidian distance between each
observation and every frame in the code_book.
Parameters
----------
obs : ndarray
Expects a rank 2 array. Each row is one observation.
code_book : ndarray
Code book to use. Same format than obs. Should have same number of
features (eg columns) than obs.
Returns
-------
code : ndarray
code[i] gives the label of the ith obversation, that its code is
code_book[code[i]].
mind_dist : ndarray
min_dist[i] gives the distance between the ith observation and its
corresponding code.
Notes
-----
This function is slower than the C version but works for
all input types. If the inputs have the wrong types for the
C versions of the function, this one is called as a last resort.
It is about 20 times slower than the C version.
"""
# n = number of observations
# d = number of features
if np.ndim(obs) == 1:
if not np.ndim(obs) == np.ndim(code_book):
raise ValueError(
"Observation and code_book should have the same rank")
else:
return _py_vq_1d(obs, code_book)
else:
(n, d) = shape(obs)
# code books and observations should have same number of features and same
# shape
if not np.ndim(obs) == np.ndim(code_book):
raise ValueError("Observation and code_book should have the same rank")
elif not d == code_book.shape[1]:
raise ValueError("Code book(%d) and obs(%d) should have the same "
"number of features (eg columns)""" %
(code_book.shape[1], d))
code = zeros(n, dtype=int)
min_dist = zeros(n)
for i in range(n):
dist = np.sum((obs[i] - code_book) ** 2, 1)
code[i] = argmin(dist)
min_dist[i] = dist[code[i]]
return code, sqrt(min_dist)
def _py_vq_1d(obs, code_book):
""" Python version of vq algorithm for rank 1 only.
Parameters
----------
obs : ndarray
Expects a rank 1 array. Each item is one observation.
code_book : ndarray
Code book to use. Same format than obs. Should rank 1 too.
Returns
-------
code : ndarray
code[i] gives the label of the ith obversation, that its code is
code_book[code[i]].
mind_dist : ndarray
min_dist[i] gives the distance between the ith observation and its
corresponding code.
"""
raise RuntimeError("_py_vq_1d buggy, do not use rank 1 arrays for now")
n = obs.size
nc = code_book.size
dist = np.zeros((n, nc))
for i in range(nc):
dist[:, i] = np.sum(obs - code_book[i])
print(dist)
code = argmin(dist)
min_dist = dist[code]
return code, sqrt(min_dist)
def py_vq2(obs, code_book):
"""2nd Python version of vq algorithm.
The algorithm simply computes the euclidian distance between each
observation and every frame in the code_book/
Parameters
----------
obs : ndarray
Expect a rank 2 array. Each row is one observation.
code_book : ndarray
Code book to use. Same format than obs. Should have same number of
features (eg columns) than obs.
Returns
-------
code : ndarray
code[i] gives the label of the ith obversation, that its code is
code_book[code[i]].
mind_dist : ndarray
min_dist[i] gives the distance between the ith observation and its
corresponding code.
Notes
-----
This could be faster when number of codebooks is small, but it
becomes a real memory hog when codebook is large. It requires
N by M by O storage where N=number of obs, M = number of
features, and O = number of codes.
"""
d = shape(obs)[1]
# code books and observations should have same number of features
if not d == code_book.shape[1]:
raise ValueError("""
code book(%d) and obs(%d) should have the same
number of features (eg columns)""" % (code_book.shape[1], d))
diff = obs[newaxis, :, :] - code_book[:,newaxis,:]
dist = sqrt(np.sum(diff * diff, -1))
code = argmin(dist, 0)
min_dist = minimum.reduce(dist, 0)
# The next line I think is equivalent and should be faster than the one
# above, but in practice didn't seem to make much difference:
# min_dist = choose(code,dist)
return code, min_dist
def _kmeans(obs, guess, thresh=1e-5):
""" "raw" version of k-means.
Returns
-------
code_book :
the lowest distortion codebook found.
avg_dist :
the average distance a observation is from a code in the book.
Lower means the code_book matches the data better.
See Also
--------
kmeans : wrapper around k-means
Examples
--------
Note: not whitened in this example.
>>> from numpy import array
>>> from scipy.cluster.vq import _kmeans
>>> features = array([[ 1.9,2.3],
... [ 1.5,2.5],
... [ 0.8,0.6],
... [ 0.4,1.8],
... [ 1.0,1.0]])
>>> book = array((features[0],features[2]))
>>> _kmeans(features,book)
(array([[ 1.7 , 2.4 ],
[ 0.73333333, 1.13333333]]), 0.40563916697728591)
"""
code_book = array(guess, copy=True)
avg_dist = []
diff = thresh+1.
while diff > thresh:
nc = code_book.shape[0]
# compute membership and distances between obs and code_book
obs_code, distort = vq(obs, code_book)
avg_dist.append(mean(distort, axis=-1))
# recalc code_book as centroids of associated obs
if(diff > thresh):
code_book, has_members = _vq.update_cluster_means(obs, obs_code, nc)
code_book = code_book.compress(has_members, axis=0)
if len(avg_dist) > 1:
diff = avg_dist[-2] - avg_dist[-1]
# print avg_dist
return code_book, avg_dist[-1]
def kmeans(obs, k_or_guess, iter=20, thresh=1e-5):
"""
Performs k-means on a set of observation vectors forming k clusters.
The k-means algorithm adjusts the centroids until sufficient
progress cannot be made, i.e. the change in distortion since
the last iteration is less than some threshold. This yields
a code book mapping centroids to codes and vice versa.
Distortion is defined as the sum of the squared differences
between the observations and the corresponding centroid.
Parameters
----------
obs : ndarray
Each row of the M by N array is an observation vector. The
columns are the features seen during each observation.
The features must be whitened first with the `whiten` function.
k_or_guess : int or ndarray
The number of centroids to generate. A code is assigned to
each centroid, which is also the row index of the centroid
in the code_book matrix generated.
The initial k centroids are chosen by randomly selecting
observations from the observation matrix. Alternatively,
passing a k by N array specifies the initial k centroids.
iter : int, optional
The number of times to run k-means, returning the codebook
with the lowest distortion. This argument is ignored if
initial centroids are specified with an array for the
``k_or_guess`` parameter. This parameter does not represent the
number of iterations of the k-means algorithm.
thresh : float, optional
Terminates the k-means algorithm if the change in
distortion since the last k-means iteration is less than
or equal to thresh.
Returns
-------
codebook : ndarray
A k by N array of k centroids. The i'th centroid
codebook[i] is represented with the code i. The centroids
and codes generated represent the lowest distortion seen,
not necessarily the globally minimal distortion.
distortion : float
The distortion between the observations passed and the
centroids generated.
See Also
--------
kmeans2 : a different implementation of k-means clustering
with more methods for generating initial centroids but without
using a distortion change threshold as a stopping criterion.
whiten : must be called prior to passing an observation matrix
to kmeans.
Examples
--------
>>> from numpy import array
>>> from scipy.cluster.vq import vq, kmeans, whiten
>>> features = array([[ 1.9,2.3],
... [ 1.5,2.5],
... [ 0.8,0.6],
... [ 0.4,1.8],
... [ 0.1,0.1],
... [ 0.2,1.8],
... [ 2.0,0.5],
... [ 0.3,1.5],
... [ 1.0,1.0]])
>>> whitened = whiten(features)
>>> book = array((whitened[0],whitened[2]))
>>> kmeans(whitened,book)
(array([[ 2.3110306 , 2.86287398],
[ 0.93218041, 1.24398691]]), 0.85684700941625547)
>>> from numpy import random
>>> random.seed((1000,2000))
>>> codes = 3
>>> kmeans(whitened,codes)
(array([[ 2.3110306 , 2.86287398],
[ 1.32544402, 0.65607529],
[ 0.40782893, 2.02786907]]), 0.5196582527686241)
"""
if int(iter) < 1:
raise ValueError('iter must be at least 1.')
if type(k_or_guess) == type(array([])):
guess = k_or_guess
if guess.size < 1:
raise ValueError("Asked for 0 cluster ? initial book was %s" %
guess)
result = _kmeans(obs, guess, thresh=thresh)
else:
# initialize best distance value to a large value
best_dist = np.inf
No = obs.shape[0]
k = k_or_guess
if k < 1:
raise ValueError("Asked for 0 cluster ? ")
for i in range(iter):
# the initial code book is randomly selected from observations
guess = take(obs, randint(0, No, k), 0)
book, dist = _kmeans(obs, guess, thresh=thresh)
if dist < best_dist:
best_book = book
best_dist = dist
result = best_book, best_dist
return result
def _kpoints(data, k):
"""Pick k points at random in data (one row = one observation).
This is done by taking the k first values of a random permutation of 1..N
where N is the number of observation.
Parameters
----------
data : ndarray
Expect a rank 1 or 2 array. Rank 1 are assumed to describe one
dimensional data, rank 2 multidimensional data, in which case one
row is one observation.
k : int
Number of samples to generate.
"""
if data.ndim > 1:
n = data.shape[0]
else:
n = data.size
p = np.random.permutation(n)
x = data[p[:k], :].copy()
return x
def _krandinit(data, k):
"""Returns k samples of a random variable which parameters depend on data.
More precisely, it returns k observations sampled from a Gaussian random
variable which mean and covariances are the one estimated from data.
Parameters
----------
data : ndarray
Expect a rank 1 or 2 array. Rank 1 are assumed to describe one
dimensional data, rank 2 multidimensional data, in which case one
row is one observation.
k : int
Number of samples to generate.
"""
def init_rank1(data):
mu = np.mean(data)
cov = np.cov(data)
x = np.random.randn(k)
x *= np.sqrt(cov)
x += mu
return x
def init_rankn(data):
mu = np.mean(data, 0)
cov = np.atleast_2d(np.cov(data, rowvar=0))
# k rows, d cols (one row = one obs)
# Generate k sample of a random variable ~ Gaussian(mu, cov)
x = np.random.randn(k, mu.size)
x = np.dot(x, np.linalg.cholesky(cov).T) + mu
return x
nd = np.ndim(data)
if nd == 1:
return init_rank1(data)
else:
return init_rankn(data)
_valid_init_meth = {'random': _krandinit, 'points': _kpoints}
def _missing_warn():
"""Print a warning when called."""
warnings.warn("One of the clusters is empty. "
"Re-run kmean with a different initialization.")
def _missing_raise():
"""raise a ClusterError when called."""
raise ClusterError("One of the clusters is empty. "
"Re-run kmean with a different initialization.")
_valid_miss_meth = {'warn': _missing_warn, 'raise': _missing_raise}
def kmeans2(data, k, iter=10, thresh=1e-5, minit='random',
missing='warn'):
"""
Classify a set of observations into k clusters using the k-means algorithm.
The algorithm attempts to minimize the Euclidian distance between
observations and centroids. Several initialization methods are
included.
Parameters
----------
data : ndarray
A 'M' by 'N' array of 'M' observations in 'N' dimensions or a length
'M' array of 'M' one-dimensional observations.
k : int or ndarray
The number of clusters to form as well as the number of
centroids to generate. If `minit` initialization string is
'matrix', or if a ndarray is given instead, it is
interpreted as initial cluster to use instead.
iter : int, optional
Number of iterations of the k-means algrithm to run. Note
that this differs in meaning from the iters parameter to
the kmeans function.
thresh : float, optional
(not used yet)
minit : str, optional
Method for initialization. Available methods are 'random',
'points', 'uniform', and 'matrix':
'random': generate k centroids from a Gaussian with mean and
variance estimated from the data.
'points': choose k observations (rows) at random from data for
the initial centroids.
'uniform': generate k observations from the data from a uniform
distribution defined by the data set (unsupported).
'matrix': interpret the k parameter as a k by M (or length k
array for one-dimensional data) array of initial centroids.
missing : str, optional
Method to deal with empty clusters. Available methods are
'warn' and 'raise':
'warn': give a warning and continue.
'raise': raise an ClusterError and terminate the algorithm.
Returns
-------
centroid : ndarray
A 'k' by 'N' array of centroids found at the last iteration of
k-means.
label : ndarray
label[i] is the code or index of the centroid the
i'th observation is closest to.
"""
if missing not in _valid_miss_meth:
raise ValueError("Unkown missing method: %s" % str(missing))
# If data is rank 1, then we have 1 dimension problem.
nd = np.ndim(data)
if nd == 1:
d = 1
# raise ValueError("Input of rank 1 not supported yet")
elif nd == 2:
d = data.shape[1]
else:
raise ValueError("Input of rank > 2 not supported")
if np.size(data) < 1:
raise ValueError("Input has 0 items.")
# If k is not a single value, then it should be compatible with data's
# shape
if np.size(k) > 1 or minit == 'matrix':
if not nd == np.ndim(k):
raise ValueError("k is not an int and has not same rank than data")
if d == 1:
nc = len(k)
else:
(nc, dc) = k.shape
if not dc == d:
raise ValueError("k is not an int and has not same rank than\
data")
clusters = k.copy()
else:
try:
nc = int(k)
except TypeError:
raise ValueError("k (%s) could not be converted to an integer " % str(k))
if nc < 1:
raise ValueError("kmeans2 for 0 clusters ? (k was %s)" % str(k))
if not nc == k:
warnings.warn("k was not an integer, was converted.")
try:
init = _valid_init_meth[minit]
except KeyError:
raise ValueError("unknown init method %s" % str(minit))
clusters = init(data, k)
if int(iter) < 1:
raise ValueError("iter = %s is not valid. iter must be a positive integer." % iter)
return _kmeans2(data, clusters, iter, nc, _valid_miss_meth[missing])
def _kmeans2(data, code, niter, nc, missing):
""" "raw" version of kmeans2. Do not use directly.
Run k-means with a given initial codebook.
"""
for i in range(niter):
# Compute the nearest neighbour for each obs
# using the current code book
label = vq(data, code)[0]
# Update the code by computing centroids using the new code book
new_code, has_members = _vq.update_cluster_means(data, label, nc)
if not has_members.all():
missing()
# Set the empty clusters to their previous positions
new_code[~has_members] = code[~has_members]
code = new_code
return code, label
|
mortonjt/scipy
|
scipy/cluster/vq.py
|
Python
|
bsd-3-clause
| 25,312
|
[
"Gaussian"
] |
860a4b11ba3bc6e3c2e2ef214629499b1b652030ec719b9416ee6792bdbce5a3
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import random
from collections import defaultdict, deque
from typing import Any, Text, List, Dict
from rasa_core.events import UserUttered, ActionExecuted
from rasa_core.interpreter import RegexInterpreter
from rasa_core.training_utils import STORY_START
from rasa_core.training_utils.story_graph import StoryGraph
from rasa_nlu.training_data import TrainingData
EDGE_NONE_LABEL = "NONE"
class UserMessageGenerator(object):
def __init__(self, training_data):
self.training_data = training_data
self.mapping = self._create_reverse_mapping(self.training_data)
def _create_reverse_mapping(self, data):
# type: (TrainingData) -> Dict[Text, List[Dict[Text, Any]]]
"""Create a mapping from intent to messages
This allows a faster intent lookup."""
d = defaultdict(list)
for example in data.training_examples:
if example.get("intent", {}) is not None:
d[example.get("intent", {})].append(example)
return d
@staticmethod
def _contains_same_entity(entities, e):
return (entities.get(e.get("entity")) is None or
entities.get(e.get("entity")) != e.get("value"))
def message_for_data(self, structured_info):
"""Find a data sample with the same intent and entities.
Given the parsed data from a message (intent and entities) finds a
message in the data that has the same intent and entities."""
if structured_info.get("intent") is not None:
intent_name = structured_info.get("intent", {}).get("name")
usable_examples = self.mapping.get(intent_name, [])[:]
random.shuffle(usable_examples)
for example in usable_examples:
entities = {e.get("entity"): e.get("value")
for e in example.get("entities", [])}
for e in structured_info.get("entities", []):
if self._contains_same_entity(entities, e):
break
else:
return example.text
return structured_info.get("text")
def _fingerprint_node(G, node, max_history):
"""Fingerprint a node in a graph.
Can be used to identify nodes that are similar and can be merged within the
graph.
Generates all paths starting at `node` following the directed graph up to
the length of `max_history`, and returns a set of strings describing the
found paths. If the fingerprint creation for two nodes results in the same
sets these nodes are indistinguishable if we walk along the path and only
remember max history number of nodes we have visited. Hence, if we randomly
walk on our directed graph, always only remembering the last `max_history`
nodes we have visited, we can never remember if we have visited node A or
node B if both have the same fingerprint. """
# the candidate list contains all node paths that haven't been
# extended till `max_history` length yet.
candidates = deque()
candidates.append([node])
continuations = []
while len(candidates) > 0:
candidate = candidates.pop()
last = candidate[-1]
empty = True
for _, succ_node in G.out_edges(last):
next_candidate = candidate[:]
next_candidate.append(succ_node)
# if the path is already long enough, we add it to the results,
# otherwise we add it to the candidates
# that we still need to visit
if len(next_candidate) == max_history:
continuations.append(next_candidate)
else:
candidates.append(next_candidate)
empty = False
if empty:
continuations.append(candidate)
return {" - ".join([G.node[node]["label"]
for node in continuation])
for continuation in continuations}
def _incoming_edges(G, node):
return {(prev_node, k) for prev_node, _, k in G.in_edges(node, keys=True)}
def _outgoing_edges(G, node):
return {(succ_node, k) for _, succ_node, k in G.out_edges(node, keys=True)}
def _outgoing_edges_are_similar(G, node_a, node_b):
"""If the outgoing edges from the two nodes are similar enough,
it doesn't matter if you are in a or b.
As your path will be the same because the outgoing edges will lead you to
the same nodes anyways."""
ignored = {node_b, node_a}
a_edges = {(target, k) for target, k in _outgoing_edges(G, node_a) if
target not in ignored}
b_edges = {(target, k) for target, k in _outgoing_edges(G, node_b) if
target not in ignored}
return a_edges == b_edges or not a_edges or not b_edges
def _nodes_are_equivalent(G, node_a, node_b, max_history):
"""Decides if two nodes are equivalent based on their fingerprints."""
return (
G.node[node_a]["label"] == G.node[node_b]["label"] and (
_outgoing_edges_are_similar(G, node_a, node_b) or
_incoming_edges(G, node_a) == _incoming_edges(G, node_b) or
_fingerprint_node(G, node_a, max_history) ==
_fingerprint_node(G, node_b, max_history)))
def _add_edge(G, u, v, key, label=None):
"""Adds an edge to the graph if the edge is not already present. Uses the
label as the key."""
if key is None or key == EDGE_NONE_LABEL:
# Can't use `None` as a label
if not G.has_edge(u, v, key=EDGE_NONE_LABEL):
G.add_edge(u, v, key=EDGE_NONE_LABEL, label="")
elif not G.has_edge(u, v, key):
G.add_edge(u, v, key=key, label=label)
def _merge_equivalent_nodes(G, max_history):
"""Searches for equivalent nodes in the graph and merges them."""
changed = True
# every node merge changes the graph and can trigger previously
# impossible node merges - we need to repeat until
# the graph doesn't change anymore
while changed:
changed = False
remaining_node_ids = [n for n in G.nodes() if n > 0]
for idx, i in enumerate(remaining_node_ids):
if G.has_node(i):
for j in remaining_node_ids[
idx + 1:]: # assumes node equivalence is cumulative
if G.has_node(j) and \
_nodes_are_equivalent(G, i, j, max_history):
changed = True
# moves all outgoing edges to the other node
j_outgoing_edges = G.out_edges(j, keys=True, data=True)
for _, succ_node, k, d in j_outgoing_edges:
_add_edge(G, i, succ_node, k, d.get("label"))
G.remove_edge(j, succ_node)
# moves all incoming edges to the other node
j_incoming_edges = G.in_edges(j, keys=True, data=True)
for prev_node, _, k, d in j_incoming_edges:
_add_edge(G, prev_node, i, k, d.get("label"))
G.remove_edge(prev_node, j)
G.remove_node(j)
def _replace_edge_labels_with_nodes(G, next_id, interpreter, training_data):
"""User messages are created as edge labels. This removes the labels and
creates nodes instead.
The algorithms (e.g. merging) are simpler if the user messages are labels
on the edges. But it sometimes
looks better if in the final graphs the user messages are nodes instead
of edge labels."""
if training_data:
message_generator = UserMessageGenerator(training_data)
else:
message_generator = None
for s, e, k, d in G.edges(keys=True, data=True):
if k != EDGE_NONE_LABEL:
if message_generator and d.get("label", k) is not None:
parsed_info = interpreter.parse(d.get("label", k))
label = message_generator.message_for_data(parsed_info)
else:
label = d.get("label", k)
next_id += 1
G.remove_edge(s, e, k)
G.add_node(next_id, label=label, style="filled",
fillcolor="lightblue", shape="box")
G.add_edge(s, next_id)
G.add_edge(next_id, e)
def _persist_graph(G, output_file):
"""Plots the graph and persists it into a file. Uses graphviz (needs to
be installed!)."""
import networkx as nx
A = nx.nx_agraph.to_agraph(G) # convert to a graphviz graph
A.layout("dot", args="-Goverlap=false -Gsplines=true -Gconcentrate=true")
A.draw(output_file)
def visualize_stories(story_steps,
output_file=None,
max_history=2,
interpreter=RegexInterpreter(),
training_data=None):
"""Given a set of stories, generates a graph visualizing the flows in the
stories.
Visualization is always a trade off between making the graph as small as
possible while
at the same time making sure the meaning doesn't change to "much". The
algorithm will
compress the graph generated from the stories to merge nodes that are
similar. Hence,
the algorithm might create paths through the graph that aren't actually
specified in the
stories, but we try to minimize that.
Output file defines if and where a file containing the plotted graph
should be stored.
The history defines how much 'memory' the graph has. This influences in
which situations the
algorithm will merge nodes. Nodes will only be merged if they are equal
within the history, this
means the larger the history is we take into account the less likely it
is we merge any nodes.
The training data parameter can be used to pass in a Rasa NLU training
data instance. It will
be used to replace the user messages from the story file with actual
messages from the training data."""
import networkx as nx
story_graph = StoryGraph(story_steps)
G = nx.MultiDiGraph()
next_node_idx = 0
G.add_node(0, label="START", fillcolor="green", style="filled")
G.add_node(-1, label="END", fillcolor="red", style="filled")
checkpoint_indices = defaultdict(list)
checkpoint_indices[STORY_START] = [0]
for step in story_graph.ordered_steps():
current_nodes = checkpoint_indices[step.start_checkpoint]
message = None
for el in step.events:
if isinstance(el, UserUttered):
message = interpreter.parse(el.text)
elif isinstance(el, ActionExecuted):
if message:
message_key = message.get("intent", {}).get("name", None)
message_label = message.get("text", None)
else:
message_key = None
message_label = None
next_node_idx += 1
G.add_node(next_node_idx, label=el.action_name)
for current_node in current_nodes:
_add_edge(G, current_node, next_node_idx, message_key,
message_label)
current_nodes = [next_node_idx]
message = None
if not step.end_checkpoint:
for current_node in current_nodes:
G.add_edge(current_node, -1, key=EDGE_NONE_LABEL)
else:
checkpoint_indices[step.end_checkpoint].extend(current_nodes)
_merge_equivalent_nodes(G, max_history)
_replace_edge_labels_with_nodes(
G, next_node_idx, interpreter, training_data)
if output_file:
_persist_graph(G, output_file)
return G
|
deepak02/rasa_core
|
rasa_core/training_utils/visualization.py
|
Python
|
apache-2.0
| 11,796
|
[
"VisIt"
] |
065a9a3d58986a52e762c6a0ac15fed0d3d88b4a95117883fe280deb906e49dd
|
#! /usr/bin/env python3
import textwrap
import re
def regex_rules(words, extract=0):
"""A function which returns the regular expression (regex) pattern for the
provided keywords.
Parameters
----------
words : list
A list of keywords or regex patterns.
extract : int, default 0
- 0 : Only the provided keywords or regex patterns.
- 1 : Including words/word adjacent to the keywords or regex patterns.
- 2 : Including the whole sentence having the keywords or regex
patterns. It assumes a sentence is sandwiched by two periods
or start/end boundaries.
Returns
-------
patterns : list
A list of regex rules containing the keywords in the 'words',
according to the rules defined by 'extract'.
"""
assert type(words) is list, "words is not in a list format"
for word in words:
assert type(word) is str, "words must have elements of strings"
patterns = []
for word in words:
if extract == 0:
# \b means \x08, a backspace. So we need \\b
pattern = '\\b(' + word + ')\\b'
elif extract == 1:
# \w or \\w are the same
# The usage of () implies a backreference, need (?: )
pattern = '(?:\w+\s+|\\b)(?:' + word + ')(?:\s+\w+|\\b)'
elif extract == 2:
pattern = "(?:\.?|^)[^.]*\\b(?:" + word + ")\\b[^.]*(?:\.?|$)"
patterns.append(pattern)
return patterns
def regex(text, keyword, rule):
"""A simple function used to test the text extraction
based on the designed regular expression patterns.
Parameters
----------
text : str
The input text.
keyword : list
A list of keywords or regex patterns.
rule : int
Same as the 'extract' in the regex_rules function.
"""
print(text)
patterns = regex_rules(keyword, rule)
print(patterns)
for pattern in patterns:
text_extracted = re.findall(pattern, text, re.I)
if text_extracted:
print(text_extracted)
print('*'*80)
if __name__ == "__main__":
rule0 = 0
rule1 = 1
rule2 = 2
text1 = textwrap.dedent("""
Johnathan likes to play badminton. John likes to play basketball.
Max likes computer games, but Johnathan prefer board games instead.
Michael and Max love to play soccer.
""")
keyword1 = ['John|Max', 'games']
regex(text1, keyword1, rule0)
regex(text1, keyword1, rule1)
text0 = textwrap.dedent("""
I visited the company a few years ago.
They visit ABC company in this coming
weekend, and they would like to have another visit. Visit to ABC company
makes people feeling great. I am keen to arrange a
new visit
by the end of the year.
""")
keyword0 = ['visit']
regex(text0, keyword0, rule1)
regex(text0, keyword0, rule2)
text2 = textwrap.dedent("""
Hi there, we are from an international company. We provide excellent
services and some of our clients are from MNC. We have been in the
business for more than 50 years.
Enroll into a yearly contract with us and get your air-conditioning
serviced at $25 per unit.
""")
keyword2 = ['(?:\$?\d+\.?\d?\d?|price|charges?)(?: is| nett| per)?(?:/| per | an | one | half | every | each )(?:unit|(?!hour |hr )\w+)\\b']
regex(text2, keyword2, rule2)
|
JQGoh/jqlearning
|
Analysis/StringPatternsWithKeywords/main.py
|
Python
|
gpl-3.0
| 3,448
|
[
"VisIt"
] |
4b5d999342d3632f733b2fb33aa6305b3134487e495eb553fd6d57859381ec91
|
# -*- coding: utf-8 -*-
"""Tests for reserved field handling in header
"""
from vcfpy import header
# INFO for (small) variants ---------------------------------------------------
def test_info_aa():
assert header.RESERVED_INFO["AA"]
assert header.RESERVED_INFO["AA"].type == "String"
assert header.RESERVED_INFO["AA"].number == 1
assert header.RESERVED_INFO["AA"].description
def test_info_ac():
assert header.RESERVED_INFO["AC"]
assert header.RESERVED_INFO["AC"].type == "Integer"
assert header.RESERVED_INFO["AC"].number == "A"
assert header.RESERVED_INFO["AC"].description
def test_info_ad():
assert header.RESERVED_INFO["AD"]
assert header.RESERVED_INFO["AD"].type == "Integer"
assert header.RESERVED_INFO["AD"].number == "R"
assert header.RESERVED_INFO["AD"].description
def test_info_adf():
assert header.RESERVED_INFO["ADF"]
assert header.RESERVED_INFO["ADF"].type == "Integer"
assert header.RESERVED_INFO["ADF"].number == "R"
assert header.RESERVED_INFO["ADF"].description
def test_info_adr():
assert header.RESERVED_INFO["ADR"]
assert header.RESERVED_INFO["ADR"].type == "Integer"
assert header.RESERVED_INFO["ADR"].number == "R"
assert header.RESERVED_INFO["ADR"].description
def test_info_an():
assert header.RESERVED_INFO["AN"]
assert header.RESERVED_INFO["AN"].type == "Integer"
assert header.RESERVED_INFO["AN"].number == 1
assert header.RESERVED_INFO["AN"].description
def test_info_bq():
assert header.RESERVED_INFO["BQ"]
assert header.RESERVED_INFO["BQ"].type == "Float"
assert header.RESERVED_INFO["BQ"].number == 1
assert header.RESERVED_INFO["BQ"].description
def test_info_cigar():
assert header.RESERVED_INFO["CIGAR"]
assert header.RESERVED_INFO["CIGAR"].type == "String"
assert header.RESERVED_INFO["CIGAR"].number == "A"
assert header.RESERVED_INFO["CIGAR"].description
def test_info_DP():
assert header.RESERVED_INFO["DB"]
assert header.RESERVED_INFO["DB"].type == "Flag"
assert header.RESERVED_INFO["DB"].number == 0
assert header.RESERVED_INFO["DB"].description
def test_info_dp():
assert header.RESERVED_INFO["DP"]
assert header.RESERVED_INFO["DP"].type == "Integer"
assert header.RESERVED_INFO["DP"].number == 1
assert header.RESERVED_INFO["DP"].description
def test_info_h2():
assert header.RESERVED_INFO["H2"]
assert header.RESERVED_INFO["H2"].type == "Flag"
assert header.RESERVED_INFO["H2"].number == 0
assert header.RESERVED_INFO["H2"].description
def test_info_h3():
assert header.RESERVED_INFO["H3"]
assert header.RESERVED_INFO["H3"].type == "Flag"
assert header.RESERVED_INFO["H3"].number == 0
assert header.RESERVED_INFO["H3"].description
def test_info_mq():
assert header.RESERVED_INFO["MQ"]
assert header.RESERVED_INFO["MQ"].type == "Integer"
assert header.RESERVED_INFO["MQ"].number == 1
assert header.RESERVED_INFO["MQ"].description
def test_info_mq0():
assert header.RESERVED_INFO["MQ0"]
assert header.RESERVED_INFO["MQ0"].type == "Integer"
assert header.RESERVED_INFO["MQ0"].number == 1
assert header.RESERVED_INFO["MQ0"].description
def test_info_ns():
assert header.RESERVED_INFO["NS"]
assert header.RESERVED_INFO["NS"].type == "Integer"
assert header.RESERVED_INFO["NS"].number == 1
assert header.RESERVED_INFO["NS"].description
def test_info_sb():
assert header.RESERVED_INFO["SB"]
assert header.RESERVED_INFO["SB"].type == "Integer"
assert header.RESERVED_INFO["SB"].number == 4
assert header.RESERVED_INFO["SB"].description
def test_info_somatic():
assert header.RESERVED_INFO["SOMATIC"]
assert header.RESERVED_INFO["SOMATIC"].type == "Flag"
assert header.RESERVED_INFO["SOMATIC"].number == 0
assert header.RESERVED_INFO["SOMATIC"].description
def test_info_validated():
assert header.RESERVED_INFO["VALIDATED"]
assert header.RESERVED_INFO["VALIDATED"].type == "Flag"
assert header.RESERVED_INFO["VALIDATED"].number == 0
assert header.RESERVED_INFO["VALIDATED"].description
def test_info_1000g():
assert header.RESERVED_INFO["1000G"]
assert header.RESERVED_INFO["1000G"].type == "Flag"
assert header.RESERVED_INFO["1000G"].number == 0
assert header.RESERVED_INFO["1000G"].description
# INFO for SVs ----------------------------------------------------------------
def test_info_imprecise():
assert header.RESERVED_INFO["IMPRECISE"]
assert header.RESERVED_INFO["IMPRECISE"].type == "Flag"
assert header.RESERVED_INFO["IMPRECISE"].number == 0
assert header.RESERVED_INFO["IMPRECISE"].description
def test_info_novel():
assert header.RESERVED_INFO["NOVEL"]
assert header.RESERVED_INFO["NOVEL"].type == "Flag"
assert header.RESERVED_INFO["NOVEL"].number == 0
assert header.RESERVED_INFO["NOVEL"].description
def test_info_end():
assert header.RESERVED_INFO["END"]
assert header.RESERVED_INFO["END"].type == "Integer"
assert header.RESERVED_INFO["END"].number == 1
assert header.RESERVED_INFO["END"].description
def test_info_svtype():
assert header.RESERVED_INFO["SVTYPE"]
assert header.RESERVED_INFO["SVTYPE"].type == "String"
assert header.RESERVED_INFO["SVTYPE"].number == 1
assert header.RESERVED_INFO["SVTYPE"].description
def test_info_svlen():
assert header.RESERVED_INFO["SVLEN"]
assert header.RESERVED_INFO["SVLEN"].type == "Integer"
assert header.RESERVED_INFO["SVLEN"].number == 1
assert header.RESERVED_INFO["SVLEN"].description
def test_info_cipos():
assert header.RESERVED_INFO["CIPOS"]
assert header.RESERVED_INFO["CIPOS"].type == "Integer"
assert header.RESERVED_INFO["CIPOS"].number == 2
assert header.RESERVED_INFO["CIPOS"].description
def test_info_ciend():
assert header.RESERVED_INFO["CIEND"]
assert header.RESERVED_INFO["CIEND"].type == "Integer"
assert header.RESERVED_INFO["CIEND"].number == 2
assert header.RESERVED_INFO["CIEND"].description
def test_info_homlen():
assert header.RESERVED_INFO["HOMLEN"]
assert header.RESERVED_INFO["HOMLEN"].type == "Integer"
assert header.RESERVED_INFO["HOMLEN"].number == "."
assert header.RESERVED_INFO["HOMLEN"].description
def test_info_homseq():
assert header.RESERVED_INFO["HOMSEQ"]
assert header.RESERVED_INFO["HOMSEQ"].type == "String"
assert header.RESERVED_INFO["HOMSEQ"].number == "."
assert header.RESERVED_INFO["HOMSEQ"].description
def test_info_bkptid():
assert header.RESERVED_INFO["BKPTID"]
assert header.RESERVED_INFO["BKPTID"].type == "String"
assert header.RESERVED_INFO["BKPTID"].number == "."
assert header.RESERVED_INFO["BKPTID"].description
def test_info_meinfo():
assert header.RESERVED_INFO["MEINFO"]
assert header.RESERVED_INFO["MEINFO"].type == "String"
assert header.RESERVED_INFO["MEINFO"].number == 4
assert header.RESERVED_INFO["MEINFO"].description
def test_info_metrans():
assert header.RESERVED_INFO["METRANS"]
assert header.RESERVED_INFO["METRANS"].type == "String"
assert header.RESERVED_INFO["METRANS"].number == 4
assert header.RESERVED_INFO["METRANS"].description
def test_info_dgvid():
assert header.RESERVED_INFO["DGVID"]
assert header.RESERVED_INFO["DGVID"].type == "String"
assert header.RESERVED_INFO["DGVID"].number == 1
assert header.RESERVED_INFO["DGVID"].description
def test_info_dbvarid():
assert header.RESERVED_INFO["DBVARID"]
assert header.RESERVED_INFO["DBVARID"].type == "String"
assert header.RESERVED_INFO["DBVARID"].number == 1
assert header.RESERVED_INFO["DBVARID"].description
def test_info_dbripid():
assert header.RESERVED_INFO["DBRIPID"]
assert header.RESERVED_INFO["DBRIPID"].type == "String"
assert header.RESERVED_INFO["DBRIPID"].number == 1
assert header.RESERVED_INFO["DBRIPID"].description
def test_info_mateid():
assert header.RESERVED_INFO["MATEID"]
assert header.RESERVED_INFO["MATEID"].type == "String"
assert header.RESERVED_INFO["MATEID"].number == "."
assert header.RESERVED_INFO["MATEID"].description
def test_info_parid():
assert header.RESERVED_INFO["PARID"]
assert header.RESERVED_INFO["PARID"].type == "String"
assert header.RESERVED_INFO["PARID"].number == 1
assert header.RESERVED_INFO["PARID"].description
def test_info_event():
assert header.RESERVED_INFO["EVENT"]
assert header.RESERVED_INFO["EVENT"].type == "String"
assert header.RESERVED_INFO["EVENT"].number == 1
assert header.RESERVED_INFO["EVENT"].description
def test_info_cilen():
assert header.RESERVED_INFO["CILEN"]
assert header.RESERVED_INFO["CILEN"].type == "Integer"
assert header.RESERVED_INFO["CILEN"].number == 2
assert header.RESERVED_INFO["CILEN"].description
def test_info_dpadj():
assert header.RESERVED_INFO["DPADJ"]
assert header.RESERVED_INFO["DPADJ"].type == "Integer"
assert header.RESERVED_INFO["DPADJ"].number == "."
assert header.RESERVED_INFO["DPADJ"].description
def test_info_cn():
assert header.RESERVED_INFO["CN"]
assert header.RESERVED_INFO["CN"].type == "Integer"
assert header.RESERVED_INFO["CN"].number == 1
assert header.RESERVED_INFO["CN"].description
def test_info_cnadj():
assert header.RESERVED_INFO["CNADJ"]
assert header.RESERVED_INFO["CNADJ"].type == "Integer"
assert header.RESERVED_INFO["CNADJ"].number == "."
assert header.RESERVED_INFO["CNADJ"].description
def test_info_cicn():
assert header.RESERVED_INFO["CICN"]
assert header.RESERVED_INFO["CICN"].type == "Integer"
assert header.RESERVED_INFO["CICN"].number == 2
assert header.RESERVED_INFO["CICN"].description
def test_info_cicnadj():
assert header.RESERVED_INFO["CICNADJ"]
assert header.RESERVED_INFO["CICNADJ"].type == "Integer"
assert header.RESERVED_INFO["CICNADJ"].number == "."
assert header.RESERVED_INFO["CICNADJ"].description
# FORMAT for (small) variants -------------------------------------------------
def test_format_ad():
assert header.RESERVED_FORMAT["AD"]
assert header.RESERVED_FORMAT["AD"].type == "Integer"
assert header.RESERVED_FORMAT["AD"].number == "R"
assert header.RESERVED_FORMAT["AD"].description
def test_format_adf():
assert header.RESERVED_FORMAT["ADF"]
assert header.RESERVED_FORMAT["ADF"].type == "Integer"
assert header.RESERVED_FORMAT["ADF"].number == "R"
assert header.RESERVED_FORMAT["ADF"].description
def test_format_adr():
assert header.RESERVED_FORMAT["ADR"]
assert header.RESERVED_FORMAT["ADR"].type == "Integer"
assert header.RESERVED_FORMAT["ADR"].number == "R"
assert header.RESERVED_FORMAT["ADR"].description
def test_format_dp():
assert header.RESERVED_FORMAT["DP"]
assert header.RESERVED_FORMAT["DP"].type == "Integer"
assert header.RESERVED_FORMAT["DP"].number == 1
assert header.RESERVED_FORMAT["DP"].description
def test_format_ec():
assert header.RESERVED_FORMAT["EC"]
assert header.RESERVED_FORMAT["EC"].type == "Integer"
assert header.RESERVED_FORMAT["EC"].number == "A"
assert header.RESERVED_FORMAT["EC"].description
def test_format_ft():
assert header.RESERVED_FORMAT["FT"]
assert header.RESERVED_FORMAT["FT"].type == "String"
assert header.RESERVED_FORMAT["FT"].number == "1"
assert header.RESERVED_FORMAT["FT"].description
def test_format_gq():
assert header.RESERVED_FORMAT["GQ"]
assert header.RESERVED_FORMAT["GQ"].type == "Integer"
assert header.RESERVED_FORMAT["GQ"].number == "G"
assert header.RESERVED_FORMAT["GQ"].description
def test_format_gp():
assert header.RESERVED_FORMAT["GP"]
assert header.RESERVED_FORMAT["GP"].type == "Float"
assert header.RESERVED_FORMAT["GP"].number == "G"
assert header.RESERVED_FORMAT["GP"].description
def test_format_gt():
assert header.RESERVED_FORMAT["GT"]
assert header.RESERVED_FORMAT["GT"].type == "String"
assert header.RESERVED_FORMAT["GT"].number == 1
assert header.RESERVED_FORMAT["GT"].description
def test_format_gl():
assert header.RESERVED_FORMAT["GL"]
assert header.RESERVED_FORMAT["GL"].type == "Float"
assert header.RESERVED_FORMAT["GL"].number == "G"
assert header.RESERVED_FORMAT["GL"].description
def test_format_hq():
assert header.RESERVED_FORMAT["HQ"]
assert header.RESERVED_FORMAT["HQ"].type == "Integer"
assert header.RESERVED_FORMAT["HQ"].number == 2
assert header.RESERVED_FORMAT["HQ"].description
def test_format_mq():
assert header.RESERVED_FORMAT["MQ"]
assert header.RESERVED_FORMAT["MQ"].type == "Integer"
assert header.RESERVED_FORMAT["MQ"].number == 1
assert header.RESERVED_FORMAT["MQ"].description
def test_format_pl():
assert header.RESERVED_FORMAT["PL"]
assert header.RESERVED_FORMAT["PL"].type == "Integer"
assert header.RESERVED_FORMAT["PL"].number == "G"
assert header.RESERVED_FORMAT["PL"].description
def test_format_pq():
assert header.RESERVED_FORMAT["PQ"]
assert header.RESERVED_FORMAT["PQ"].type == "Integer"
assert header.RESERVED_FORMAT["PQ"].number == 1
assert header.RESERVED_FORMAT["PQ"].description
def test_format_ps():
assert header.RESERVED_FORMAT["PS"]
assert header.RESERVED_FORMAT["PS"].type == "Integer"
assert header.RESERVED_FORMAT["PS"].number == 1
assert header.RESERVED_FORMAT["PS"].description
# FORMAT for SVs --------------------------------------------------------------
def test_format_cn():
assert header.RESERVED_FORMAT["CN"]
assert header.RESERVED_FORMAT["CN"].type == "Integer"
assert header.RESERVED_FORMAT["CN"].number == 1
assert header.RESERVED_FORMAT["CN"].description
def test_format_cnq():
assert header.RESERVED_FORMAT["CNQ"]
assert header.RESERVED_FORMAT["CNQ"].type == "Float"
assert header.RESERVED_FORMAT["CNQ"].number == 1
assert header.RESERVED_FORMAT["CNQ"].description
def test_format_cnl():
assert header.RESERVED_FORMAT["CNL"]
assert header.RESERVED_FORMAT["CNL"].type == "Float"
assert header.RESERVED_FORMAT["CNL"].number == "G"
assert header.RESERVED_FORMAT["CNL"].description
def test_format_cnp():
assert header.RESERVED_FORMAT["CNP"]
assert header.RESERVED_FORMAT["CNP"].type == "Float"
assert header.RESERVED_FORMAT["CNP"].number == "G"
assert header.RESERVED_FORMAT["CNP"].description
def test_format_nq():
assert header.RESERVED_FORMAT["NQ"]
assert header.RESERVED_FORMAT["NQ"].type == "Integer"
assert header.RESERVED_FORMAT["NQ"].number == 1
assert header.RESERVED_FORMAT["NQ"].description
def test_format_hap():
assert header.RESERVED_FORMAT["HAP"]
assert header.RESERVED_FORMAT["HAP"].type == "Integer"
assert header.RESERVED_FORMAT["HAP"].number == 1
assert header.RESERVED_FORMAT["HAP"].description
def test_format_ahap():
assert header.RESERVED_FORMAT["AHAP"]
assert header.RESERVED_FORMAT["AHAP"].type == "Integer"
assert header.RESERVED_FORMAT["AHAP"].number == 1
assert header.RESERVED_FORMAT["AHAP"].description
|
bihealth/vcfpy
|
tests/test_reserved_fields.py
|
Python
|
mit
| 15,248
|
[
"ADF"
] |
7cf8177c15680f08ca65d061cfa8bcf7452bd39037055fad223669b06926cc99
|
# Original Author: iwatobipen
#
# This file is part of the RDKit.
# The contents are covered by the terms of the BSD license
# which is included in the file license.txt, found at the root
# of the RDKit source tree.
"""
This script performs fast clustering of SMILES
Clustering method is repeated bi section, the method looks like -k-means.
To use this script, the user needs to install bayon at first.
Input format: Tab separated SMILES strings (SMILES \t molID \n ...)
Please see more details in README.
"""
import argparse
import subprocess
import pickle
import os
from rdkit import Chem
from rdkit.Chem import AllChem
def getArgParser():
""" Create the argument parser """
parser = argparse.ArgumentParser("Fast clustering for chemoinformatics")
parser.add_argument("input", help="filename of input file")
parser.add_argument("nclusters", metavar="N", help="the number of clusters")
parser.add_argument("--output", help="filename of output, tab separated format", default="clustered.tsv")
parser.add_argument("--centroid", metavar="CENTROID", help="filename of centroid information. tab separated format", default="centroid.tsv")
return parser
def smi2fp(molid, smiles):
mol = Chem.MolFromSmiles(smiles)
onbits = AllChem.GetMorganFingerprintAsBitVect(mol, 2).GetOnBits()
row = molid
for bit in onbits:
row += "\tFP_{}\t1.0".format(bit)
row += "\n"
return row
if __name__ == "__main__":
parser = getArgParser()
args = parser.parse_args()
with open(args.input, "r") as inputf:
with open("fp.tsv", "w") as tempf:
for line in inputf:
molid,smiles = line.rstrip().split("\t")
tempf.write(smi2fp(molid, smiles))
res = subprocess.call("time bayon -p -c {0.centroid} -n {0.nclusters} fp.tsv > {0.output}".format(args), shell=True)
#parse results
parsefile = open(args.output.split(".")[0]+"_parse.tsv", "w")
inputf = open(args.output, "r")
for line in inputf:
line = line.rstrip().split("\t")
cluster_id = line[0]
for i in range(1, len(line)-1, 2) :
molid = line[ i ]
point = line[ i + 1 ]
parsefile.write("{}\t{}\tCLS_ID_{}\n".format(molid, point, cluster_id))
parsefile.close()
if res != 0:
parser.exit("Error running bayon")
|
bp-kelley/rdkit
|
Contrib/Fastcluster/fastcluster.py
|
Python
|
bsd-3-clause
| 2,364
|
[
"RDKit"
] |
f7b91d17de7a41c238b69f5d54c41709118b6da9789a1cb40ac1a35261980e49
|
# -*- coding: utf-8 -*-
"""
ORCA Open Remote Control Application
Copyright (C) 2013-2020 Carsten Thielepape
Please contact me by : http://www.orca-remote.org/
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
import ast
import json
import demjson
from typing import Dict
from typing import List
from typing import Tuple
from typing import Union
from typing import Any
from typing import cast
from xml.etree.ElementTree import Element
from xml.etree.ElementTree import fromstring
from collections import OrderedDict
from ORCA.utils.LogError import LogError, LogErrorSmall
import html
__all__ = ['ToBool',
'ToDic',
'ToFloat',
'ToFloat2',
'ToHex',
'ToHexFromString',
'ToInt',
'ToIntVersion',
'ToList',
'ToOrderedDic',
'ToString',
'ToStringVersion',
'ToUnicode',
'ToBytes',
'UnEscapeUnicode',
'EscapeUnicode',
'DictToUnicode',
'XMLToDic'
]
def ToHex(iInt:int,iLen:int=2)->str:
"""
Converts as integer to a hex string
:param iInt: Integer value to convert
:param iLen: Optional: output len, default = 2
:return: The Hex String
"""
uTmp:str = "0"*iLen+hex(iInt)
uTmp = uTmp.replace('x', '0')
uTmp = uTmp[iLen*-1:]
return uTmp
def ToBytes(uStr:Union[str,bytes]) -> bytes:
"""
Converts as string to bytes
:param uStr: input string
:return: The bytes string
"""
if isinstance(uStr, bytes):
return uStr
byRet:bytes = cast(bytes,uStr)
try:
byRet = bytes(uStr, 'utf-8')
except Exception as e:
LogError(uMsg=u'ToBytes: Convert error, using fallback', oException=e)
return byRet
def ToHexFromString(uString:str) -> str:
"""
Converts a string to a hex string
:param uString: the string to convert to a hex string
:return: A hex string representation of a string
"""
return ":".join("{:02x}".format(ord(c)) for c in uString)
def DictToUnicode(vObj:Union[Dict[Any,Any],List[Any],bool,str,None]) -> str:
"""
Converts a dict to a string, without the u' prefix for unicode strings
:return: The string representation
"""
try:
if isinstance(vObj, dict):
if len(vObj)==0:
return "{}"
uRet=u'{'
for key, value in vObj.items():
uRet=uRet+DictToUnicode(key)+": "+DictToUnicode(value)+","
return uRet[:-1]+u'}'
elif isinstance(vObj, list):
if len(vObj)>0:
uRet = "["
for value in vObj:
uRet+=DictToUnicode(value)+","
uRet = uRet[:-1] +"]"
return uRet
return "[]"
elif isinstance(vObj, bool):
return ToUnicode(vObj).lower()
elif isinstance(vObj, str):
return '"'+vObj+'"'
elif vObj is None:
return '"none"'
else:
return ToUnicode(vObj)
except Exception as e:
LogError(uMsg=u'DictToUnicode: Dictionary convert error, using fallback',oException=e)
return ToUnicode(str(vObj))
def ToString(uObj:str) -> bytes:
"""
Converts an unicode string into a string
:param string uObj:
:return: The converted string (not unicode)
"""
return uObj.encode('ascii', 'xmlcharrefreplace')
def ToUnicode(Obj:Any) ->str:
"""
Converts an object into a unicode string
:rtype: string
:param Obj: any object to be converted
:return: A unicode string of Obj
"""
if isinstance(Obj, dict):
return DictToUnicode(Obj)
if isinstance(Obj, str):
return Obj
try:
if isinstance(Obj, bytes):
try:
return Obj.decode("utf-8")
except Exception as Ex:
LogError(uMsg=u'Unicode Transfer Error',oException=Ex)
uRet:str = Obj.decode(encoding="ascii", errors="ignore")
print (uRet)
return uRet
else:
return str(Obj)
except Exception as e:
LogError(uMsg=u'Unicode Transfer Error',oException=e)
print ('[',Obj,']')
print (type(Obj))
if isinstance(Obj, str):
for cChar in Obj:
print (ord(cChar))
print ('')
return str(Obj)
def ToOrderedDic(uString:str) -> OrderedDict:
"""
converts a (unicode) string into a ordered dict
:rtype: OrderedDict
:param uString: The string representation of a dict
:return: The ordered Dict
"""
dDict = OrderedDict()
aList:List[str]
uPair:str
uFinalstring:str
aKeyValue:List
uName:str
uValue:str
try:
uFinalstring = uString.strip(' \n{}')
aList = uFinalstring.split(",")
for uPair in aList:
if ":" in uPair:
aKeyValue = uPair.split(":")
uName=aKeyValue[0].strip(' \n\"')
uValue=aKeyValue[1].strip(' \n\"')
dDict[uName] = uValue
except Exception as e:
LogError(uMsg=u'ToOrderedDic: Dictionary Convert error',oException=e)
LogError(uMsg=uString)
return dDict
def ToDic(uString:Union[str,Dict]) -> Dict:
"""
converts a (unicode) string into a dict
:rtype: dict
:param uString: The string representation of a dict
:return: The dict
"""
"""
This is complex by purpose, as we might face "invalid" dic strings, from devices or by the system
eg quotes/double quotes
eg backslashes in windows paths
So i try different tools
"""
dRet:Dict[Any,Any]
uString2:str
bDoubleBack:bool = False
if isinstance(uString, dict):
return uString
if uString==u'' or uString=="{}":
return {}
if "\\" in uString:
uString2 = uString.replace("\\","***BaCkSlAsH***")
bDoubleBack = True
else:
uString2= cast(str,uString)
try:
for i in range(2):
if i==1:
uString2 = uString2.replace("\'", "\"")
try:
dRet = cast(Dict[Any,Any],json.loads(uString2))
if bDoubleBack:
DictUnescaceBackslash(dRet)
return dRet
except Exception:
pass
try:
dRet = cast(Dict[Any,Any],demjson.decode(uString2))
if bDoubleBack:
DictUnescaceBackslash(dRet)
return dRet
except Exception:
pass
try:
dRet = ast.literal_eval(uString2)
if bDoubleBack:
DictUnescaceBackslash(dRet)
return dRet
except Exception:
pass
LogError(uMsg=u'ToDic: can\'t convert string to dic:'+uString)
return {}
except Exception as e:
LogErrorSmall(uMsg=u'ToDic: Dictionary Convert error',oException=e)
LogErrorSmall(uMsg=uString)
return {}
def DictUnescaceBackslash(oDict:Dict[str,Any]) -> None:
"""
Unescapes previous escapes backslashes in dict strings
:param dict oDict:
"""
uKey:str
vValue:Any
vElem:Any
try:
for uKey, vValue in oDict.items():
if isinstance(vValue, dict):
DictUnescaceBackslash(vValue)
elif isinstance(vValue, str):
oDict[uKey]=oDict[uKey].replace("***BaCkSlAsH***","\\")
elif isinstance(vValue, list):
for vElem in vValue:
DictUnescaceBackslash(vElem)
except Exception as e:
LogError(uMsg=u'DictUnescaceBackslash',oException=e)
def ToList(uString:Union[str,List]) -> List:
"""
converts a (unicode) string into a list
Standard format should be "['par1','par2']"
:param uString: A string representation of a list
:return: The list
"""
aRet:List
uItem:str
if isinstance(uString, list):
return uString
if uString=="" or uString=="[]" or uString=="u[]":
return []
try:
return ast.literal_eval(uString)
except:
pass
try:
if uString.startswith("["):
return ast.literal_eval(uString[1:-1])
except:
pass
if not "," in uString:
if uString.startswith("["):
return [uString[1:-1]]
else:
return [uString]
try:
aRet = []
for uItem in uString.split(","):
aRet.append(uItem)
return aRet
except Exception as e:
LogError(uMsg=u'ToList: List Convert error',oException=e)
LogError(uMsg=uString)
uRet= [uString]
return uRet
def ToInt(vValue:Union[str,float]) -> int:
"""
converts a (unicode) string into an integer
(0) in case of an error
:rtype: int
:param string|float vValue: The string representation of an integer number
:return: The integer value
"""
try:
return int(vValue)
except Exception:
return 0
def ToBool(vValue:Union[str,bool]) -> bool:
"""
converts a (unicode) string into a bool
:rtype: bool
:param vValue: The string representation of a boolean value , can be (0/1) or True/False (case independent)
:return: The boolean value
"""
if isinstance(vValue,bool):
return vValue
uString=ToUnicode(vValue)
return uString.lower() in ['true', '1']
def ToFloat(uString:str) -> float:
"""
converts a (unicode) string into a float number
(0.0) in case of an error
:rtype: float
:param uString: The string representation of a float number
:return: The float value
"""
try:
return float(uString)
except Exception:
return 0.0
def ToFloat2(uValue:str) -> Tuple[float,bool]:
"""
converts a (unicode) string into a float number and returns, if conversion was successful
(0.0) in case of an error
:rtype: tuple(float, bool)
:param string uValue: The string representation of a float number
:return: A tuple of the float value and a boolean value
"""
''' converts a (unicode) string into a float and returns, if conversion was successful '''
try:
return float(uValue),True
except Exception:
return 0.0,False
def ToStringVersion(iVersion:int) -> str:
"""
converts an integer representation of a version to a version string
:rtype: string
:param int iVersion:
:return: A string representation of the version
"""
sVersion1:str
sVersion2:str
sVersion3:str
sVersion1 = str((int(iVersion / 1000000)))
sVersion2 = str(int ((iVersion % 1000000) / 1000))
sVersion3 = str(int ((iVersion % 1000)))
return "%s.%s.%s" % (sVersion1,sVersion2,sVersion3)
def ToIntVersion(uVersion:str) -> int:
"""
converts a version string into a integer version number
maximum 2 dots (3 section are allowed
maximum 3 digits per section allowed
eg 1.1.10
:rtype: int
:param str uVersion: The string representation of a version
:return: The integer representation of a version
"""
aParts:List=uVersion.split('.')
aResult:List=[0,0,0]
i:int=0
uSection:str
uNumber:int
for uSection in aParts:
uNumber=ToInt(uSection)
aResult[i]=uNumber
i+=1
return aResult[0]*1000000 +aResult[1]*1000 + aResult[2]
def UnEscapeUnicode(uObj:str) -> str:
"""
Converts unicode escapes (html escapes) into unicode values
:rtype: str
:param uObj: The unicode string to unescape unicode (html) escapes
:return: Unescaped unicode value
"""
return html.unescape(uObj)
def EscapeUnicode(uObj:str) -> str:
"""
Escapes unicode character (non ASCII) to html escapes
:rtype: str
:param str uObj: The unicode string to escape unicode values
:return: Escaped unicode value (should be ASCII conform)
"""
return ToUnicode(uObj.encode('ascii', 'xmlcharrefreplace'))
def XMLToDic(vElement:Union[Element,str]) -> Dict:
"""
converts a xml structure into a dic
:param vElement: An Elementree Node to convert, or a string representing a xml structure
:return: The converted Dictionary
"""
oElement:Element
try:
if isinstance(vElement,str):
oElement = fromstring(cast(str,vElement))
else:
oElement = cast(Element,vElement)
return cXMLToDic(oElement).getDict()
except Exception as e:
LogError(uMsg=u'XMLToDic: XML Convert error',oException=e)
LogError(uMsg=str(vElement))
return {}
class cXMLToDic(dict):
"""
Converts a Elementree xml node into a dictionary
"""
def __init__(self, oParentElement: Element):
super().__init__()
self.XML_Attributes:Element = oParentElement
self.addAttributes(self.XML_Attributes,self)
oChild:Element
for oChild in list(oParentElement):
oChild.text = oChild.text if (oChild.text is not None) else ' '
if len(oChild) == 0:
self.update(self._addToDict(uKey= oChild.tag, oValue = oChild.text.strip(), dDict = self))
self.addAttributes(oChild,self)
else:
dInnerChild = cXMLToDic(oParentElement=oChild)
self.update(self._addToDict(uKey=dInnerChild.XML_Attributes.tag, oValue=dInnerChild, dDict=self))
def getDict(self)->Dict:
"""
Return the attributes as a dict
"""
return {self.XML_Attributes.tag: self}
# noinspection PyMethodMayBeStatic
def addAttributes(self,oNode: Element,dDict:Dict):
"""
Adds the xml attributes into the Dict tree
:param oNode: The xml node to parse the attributes
:param dDict: The target dict to store the attributes
"""
for uAttribute in oNode.attrib:
uValue = oNode.get(uAttribute, default="")
if uValue:
if not "attributes" in dDict:
dDict["attributes"] = {}
dDict["attributes"][uAttribute]=uValue
for iIndex in range(1000):
sTag = uAttribute+"[%s]" % iIndex
if not sTag in dDict["attributes"]:
dDict["attributes"][sTag] = uValue
if iIndex>0:
del dDict["attributes"][uAttribute]
break
class _addToDict(dict):
def __init__(self, uKey: str, oValue, dDict: Dict):
super().__init__()
if not uKey in dDict:
self.update({uKey: oValue})
else:
identical = dDict[uKey] if type(dDict[uKey]) == list else [dDict[uKey]]
self.update({uKey: identical + [oValue]})
|
thica/ORCA-Remote
|
src/ORCA/utils/TypeConvert.py
|
Python
|
gpl-3.0
| 16,124
|
[
"ORCA"
] |
271aa1260fc149d733e4b4358ebef69e2c42a089a156382da2d0c73da5169c8b
|
#!/usr/bin/env python
"""
==================
ModEM
==================
# Generate files for ModEM
# Generate data file for ModEM
# by Paul Soeffky 2013
# revised by LK 2014
# revised by JP 2014
# edited by AK 2016
# revised by JP 2017
# revised by AK 2017 to bring across functionality from ak branch
"""
#==============================================================================
# Imports
#==============================================================================
# general packages
import os
import numpy as np
import scipy.interpolate as spi
import scipy.stats as stats
# mtpy modules
import mtpy.core.z as mtz
import mtpy.core.mt as mt
import mtpy.utils.gis_tools as gis_tools
import mtpy.modeling.ws3dinv as ws
import mtpy.imaging.mtplottools as mtplottools
import mtpy.utils.exceptions as mtex
import mtpy.utils.calculator as mtcc
import mtpy.analysis.pt as mtpt
import mtpy.imaging.mtcolors as mtcl
import mtpy.utils.configfile as mtcfg
import mtpy.utils.filehandling as mtfh
import mtpy.utils.mesh_tools as mtmesh
# Plotting tools
import matplotlib.pyplot as plt
from matplotlib.ticker import MultipleLocator, FormatStrFormatter
from matplotlib.patches import Ellipse
from matplotlib.colors import Normalize
import matplotlib.colorbar as mcb
import matplotlib.gridspec as gridspec
import matplotlib.widgets as widgets
import matplotlib.colors as colors
import matplotlib.cm as cm
# vtk tools
try:
from evtk.hl import gridToVTK, pointsToVTK
except ImportError:
print ('If you want to write a vtk file for 3d viewing, you need download '
'and install evtk from https://bitbucket.org/pauloh/pyevtk')
#==============================================================================
class Stations(object):
"""
station locations class
..note:: If the survey steps across multiple UTM zones, then a
distance will be added to the stations to place them in
the correct location. This distance is
_utm_grid_size_north and _utm_grid_size_east. You should
these parameters to place the locations in the proper spot
as grid distances and overlaps change over the globe.
**This is not implemented yet**
"""
def __init__(self, **kwargs):
self.dtype = [('station', '|S10'),
('lat', np.float),
('lon', np.float),
('elev', np.float),
('rel_east', np.float),
('rel_north', np.float),
('east', np.float),
('north', np.float),
('zone', 'S4')]
self.station_locations = np.zeros(0, dtype=self.dtype)
self.model_epsg = None
self.model_utm_zone = None
for key in kwargs.keys():
if hasattr(self,key):
setattr(self,key,kwargs[key])
## --> define properties that can only be returned and not set
@property
def lat(self):
return self.station_locations['lat']
@property
def lon(self):
return self.station_locations['lon']
@property
def east(self):
return self.station_locations['east']
@property
def north(self):
return self.station_locations['north']
@property
def elev(self):
return self.station_locations['elev']
@property
def rel_east(self):
return self.station_locations['rel_east']
@property
def rel_north(self):
return self.station_locations['rel_north']
@property
def utm_zone(self):
return self.station_locations['zone']
@property
def station(self):
return self.station_locations['station']
def _get_mt_objs_from_list(self, input_list):
"""
get mt_objects from a list of files or mt_objects
"""
if type(input_list) not in [list, np.ndarray]:
raise ValueError('Input list needs to be type list, not {0}'.format(type(input_list)))
if type(input_list[0]) is mt.MT:
return input_list
if type(input_list[0]) is str:
if input_list[0].endswith('.edi'):
return [mt.MT(fn) for fn in input_list]
else:
raise ModEMError('file {0} not supported yet'.format(input_list[0][-4:]))
def get_station_locations(self, input_list):
"""
get station locations from a list of edi files
Arguments
-------------
**input_list** : list
list of edi file names, or mt_objects
Returns
------------
* fills station_locations array
"""
mt_obj_list = self._get_mt_objs_from_list(input_list)
#if station locations are not input read from the edi files
if mt_obj_list is None:
raise AttributeError('mt_obj_list is None, need to input a list of '
'mt objects to read in.')
n_stations = len(mt_obj_list)
if n_stations == 0:
raise ModEMError('No .edi files in edi_list, please check '
'file locations.')
#make a structured array to put station location information into
self.station_locations = np.zeros(n_stations,
dtype=self.dtype)
#get station locations in meters
for ii, mt_obj in enumerate(mt_obj_list):
self.station_locations[ii]['lat'] = mt_obj.lat
self.station_locations[ii]['lon'] = mt_obj.lon
self.station_locations[ii]['station'] = mt_obj.station
self.station_locations[ii]['elev'] = mt_obj.elev
if ((self.model_epsg is not None) or (self.model_utm_zone is not None)):
east,north,utm_zone = gis_tools.project_point_ll2utm(mt_obj.lat,
mt_obj.lon,
utm_zone=self.model_utm_zone,
epsg=self.model_epsg)
self.station_locations[ii]['east'] = east
self.station_locations[ii]['north'] = north
self.station_locations[ii]['zone'] = utm_zone
else:
self.station_locations[ii]['east'] = mt_obj.east
self.station_locations[ii]['north'] = mt_obj.north
self.station_locations[ii]['zone'] = mt_obj.utm_zone
# get relative station locations
self.calculate_rel_locations()
def calculate_rel_locations(self, shift_east=0, shift_north=0):
"""
put station in a coordinate system relative to
(shift_east, shift_north)
(+) shift right or up
(-) shift left or down
"""
#
# #remove the average distance to get coordinates in a relative space
# self.station_locations['rel_east'] = self.east-self.east.mean()
# self.station_locations['rel_north'] = self.north-self.north.mean()
#
# #translate the stations so they are relative to 0,0
# east_center = (self.rel_east.max()-np.abs(self.rel_east.min()))/2.
# north_center = (self.rel_north.max()-np.abs(self.rel_north.min()))/2.
#
#
# #remove the average distance to get coordinates in a relative space
# self.station_locations['rel_east'] -= east_center+shift_east
# self.station_locations['rel_north'] -= north_center+shift_north
#translate the stations so they are relative to 0,0
east_center = (self.east.max()+self.east.min())/2.
north_center = (self.north.max()+self.north.min())/2.
self.station_locations['rel_east'] = self.east - east_center
self.station_locations['rel_north'] = self.north - north_center
# make center point a get method, can't set it.
@property
def center_point(self):
"""
calculate the center point from the given station locations
Returns
-------------
**center_location** : np.ndarray
structured array of length 1
dtype includes (east, north, zone, lat, lon)
"""
center_location = np.recarray(1, dtype=self.dtype)
# AK - using the mean here but in get_relative_locations used (max + min)/2, why???
# center_point = np.array([self.east.mean(), self.north.mean()])
#
# #translate the stations so they are relative to 0,0
# east_center = (self.rel_east.max()-np.abs(self.rel_east.min()))/2
# north_center = (self.rel_north.max()-np.abs(self.rel_north.min()))/2
#
# center_point[0] -= east_center
# center_point[1] -= north_center
#
# # calculate center point in lat, lon, easting, northing
# center_location['east'] = center_point[0]
# center_location['north'] = center_point[1]
center_point = np.array([self.east.max() + self.east.min(),
self.north.max() + self.north.min()])/2.
center_location['east'] = center_point[0]
center_location['north'] = center_point[1]
center_location['zone'] = self.utm_zone[0]
center_ll = gis_tools.project_point_utm2ll(float(center_point[0]),
float(center_point[1]),
self.utm_zone[0],
epsg=self.model_epsg)
center_location['lat'] = center_ll[0]
center_location['lon'] = center_ll[1]
return center_location
def rotate_stations(self, rotation_angle):
"""
Rotate stations assuming N is 0
Arguments
-------------
**rotation_angle** : float
angle in degrees assuming N is 0
Returns
-------------
* refils rel_east and rel_north in station_locations. Does this
because you will still need the original locations for plotting
later.
"""
cos_ang = np.cos(np.deg2rad(rotation_angle))
sin_ang = np.sin(np.deg2rad(rotation_angle))
rot_matrix = np.matrix(np.array([[cos_ang, sin_ang],
[-sin_ang, cos_ang]]))
coords = np.array([self.station_locations['rel_east'],
self.station_locations['rel_north']])
#rotate the relative station locations
new_coords = np.array(np.dot(rot_matrix, coords))
self.station_locations['rel_east'][:] = new_coords[0, :]
self.station_locations['rel_north'][:] = new_coords[1, :]
print 'Rotated stations by {0:.1f} deg clockwise from N'.format(
rotation_angle)
def check_utm_crossing(self):
"""
If the stations cross utm zones, then estimate distance by computing
distance on a sphere.
"""
#
# latMid = (Lat1+Lat2 )/2.0; // or just use Lat1 for slightly less accurate estimate
#
#
# m_per_deg_lat = 111132.954 - 559.822 * cos( 2.0 * latMid ) + 1.175 * cos( 4.0 * latMid);
# m_per_deg_lon = (3.14159265359/180 ) * 6367449 * cos ( latMid );
#
# deltaLat = fabs(Lat1 - Lat2);
# deltaLon = fabs(Lon1 - Lon2);
#
# dist_m = sqrt ( pow( deltaLat * m_per_deg_lat,2) + pow( deltaLon * m_per_deg_lon , 2) );
#
pass
class Data(object):
"""
Data will read and write .dat files for ModEM and convert a WS data file
to ModEM format.
..note: :: the data is interpolated onto the given periods such that all
stations invert for the same periods. The interpolation is
a linear interpolation of each of the real and imaginary parts
of the impedance tensor and induction tensor.
See mtpy.core.mt.MT.interpolate for more details
Arguments
------------
**edi_list** : list
list of full paths to .edi files you want to invert for
====================== ====================================================
Attributes/Key Words Description
====================== ====================================================
_dtype internal variable defining the data type of
data_array
_t_shape internal variable defining shape of tipper array in
_dtype
_z_shape internal variable defining shape of Z array in
_dtype
center_position (east, north, evel) for center point of station
array. All stations are relative to this location
for plotting purposes.
comp_index_dict dictionary for index values of component of Z and T
station_locations Stations object
data_array numpy.ndarray (num_stations) structured to store
data. keys are:
* station --> station name
* lat --> latitude in decimal degrees
* lon --> longitude in decimal degrees
* elev --> elevation (m)
* rel_east -- > relative east location to
center_position (m)
* rel_north --> relative north location to
center_position (m)
* east --> UTM east (m)
* north --> UTM north (m)
* zone --> UTM zone
* z --> impedance tensor array with shape
(num_freq, 2, 2)
* z_err --> impedance tensor error array with
shape (num_freq, 2, 2)
* tip --> Tipper array with shape
(num_freq, 1, 2)
* tipperr --> Tipper array with shape
(num_freq, 1, 2)
data_fn full path to data file
data_period_list period list from all the data
edi_list list of full paths to edi files
error_type_tipper [ 'abs' | 'floor' ]
*default* is 'abs'
error_type_z [ 'egbert' | 'mean_od' | 'eigen' ]
*default* is 'egbert_floor'
* add '_floor' to any of the above to set the
error as an error floor, otherwise all
components are give weighted the same
* 'egbert' sets error to
error_value_z * sqrt(abs(zxy*zyx))
* 'mean_od' sets error to
error_value_z * mean([Zxy, Zyx])
* 'eigen' sets error to
error_value_z * eigenvalues(Z[ii])
error_value_z percentage to multiply Z by to set error
*default* is 5 for 5% of Z as error
error_value_tipper absolute error between 0 and 1.
fn_basename basename of data file. *default* is 'ModEM_Data.dat'
header_strings strings for header of data file following the format
outlined in the ModEM documentation
inv_comp_dict dictionary of inversion componets
inv_mode inversion mode, options are: *default* is '1'
* '1' --> for 'Full_Impedance' and
'Full_Vertical_Components'
* '2' --> 'Full_Impedance'
* '3' --> 'Off_Diagonal_Impedance' and
'Full_Vertical_Components'
* '4' --> 'Off_Diagonal_Impedance'
* '5' --> 'Full_Vertical_Components'
* '6' --> 'Full_Interstation_TF'
* '7' --> 'Off_Diagonal_Rho_Phase'
inv_mode_dict dictionary for inversion modes
max_num_periods maximum number of periods
model_epsg epsg code for model projection, provide this to
project model to non-utm coordinates. Find the epsg
code for your projection on
http://spatialreference.org/ref/ or google search
epsg "your projection"
model_utm_zone alternative to model_epsg, choose a utm zone to
project all sites to (e.g. '55S')
mt_dict dictionary of mtpy.core.mt.MT objects with keys
being station names
period_dict dictionary of period index for period_list
period_list list of periods to invert for
period_max maximum value of period to invert for
period_min minimum value of period to invert for
rotate_angle Angle to rotate data to assuming 0 is N and E is 90
save_path path to save data file to
units [ [V/m]/[T] | [mV/km]/[nT] | Ohm ] units of Z
*default* is [mV/km]/[nT]
wave_sign_impedance [ + | - ] sign of time dependent wave.
*default* is '+' as positive downwards.
wave_sign_tipper [ + | - ] sign of time dependent wave.
*default* is '+' as positive downwards.
====================== ====================================================
========================== ================================================
Methods Description
========================== ================================================
convert_ws3dinv_data_file convert a ws3dinv file to ModEM fomrat,
**Note** this doesn't include tipper data and
you need a station location file like the one
output by mtpy.modeling.ws3dinv
get_data_from_edi get data from given .edi files and fill
attributes accordingly
get_mt_dict get a dictionary of mtpy.core.mt.MT objects
with keys being station names
get_period_list get a list of periods to invert for
get_station_locations get station locations and relative locations
filling in station_locations
read_data_file read in a ModEM data file and fill attributes
data_array, station_locations, period_list, mt_dict
write_data_file write a ModEM data file
========================== ================================================
:Example 1 --> create inversion period list: ::
>>> import os
>>> import mtpy.modeling.modem as modem
>>> edi_path = r"/home/mt/edi_files"
>>> edi_list = [os.path.join(edi_path, edi) \
for edi in os.listdir(edi_path)\
if edi.find('.edi') > 0]
import mtpy.modeling.ModEM >>> md = mtpy.modeling.ModEM.Data(edi_list, period_min=.1, period_max=300,\
max_num_periods=12)
>>> md.write_data_file(save_path=r"/home/modem/inv1")
:Example 2 --> set inverions period list from data: ::
>>> import os
>>> import mtpy.modeling.modem as modem
>>> edi_path = r"/home/mt/edi_files"
>>> edi_list = [os.path.join(edi_path, edi) \
for edi in os.listdir(edi_path)\
if edi.find('.edi') > 0]
import mtpy.modeling.ModEM >>> md = mtpy.modeling.ModEM.Data(edi_list)
>>> #get period list from an .edi file
>>> mt_obj1 = modem.mt.MT(edi_list[0])
>>> inv_period_list = 1./mt_obj1.Z.freq
>>> #invert for every third period in inv_period_list
>>> inv_period_list = inv_period_list[np.arange(0, len(inv_period_list, 3))]
>>> md.period_list = inv_period_list
>>> md.write_data_file(save_path=r"/home/modem/inv1")
:Example 3 --> change error values: ::
import mtpy.modeling.ModEM >>> import mtpy.modeling.modem as modem
>>> mdr = mtpy.modeling.ModEM.Data()
>>> mdr.read_data_file(r"/home/modem/inv1/ModEM_Data.dat")
>>> mdr.error_type = 'floor'
>>> mdr.error_floor = 10
>>> mdr.error_tipper = .03
>>> mdr.write_data_file(save_path=r"/home/modem/inv2")
:Example 4 --> change inversion type: ::
import mtpy.modeling.ModEM >>> import mtpy.modeling.modem as modem
>>> mdr = mtpy.modeling.ModEM.Data()
>>> mdr.read_data_file(r"/home/modem/inv1/ModEM_Data.dat")
>>> mdr.inv_mode = '3'
>>> mdr.write_data_file(save_path=r"/home/modem/inv2")
:Example 5 --> rotate data: ::
>>> md.rotation_angle = 60
>>> md.write_data_file(save_path=r"/home/modem/Inv1")
>>> # or
>>> md.write_data_file(save_path=r"/home/modem/Inv1", \
rotation_angle=60)
"""
def __init__(self, edi_list=None, **kwargs):
self.edi_list = edi_list
self.error_type_z = 'egbert_floor'
self.error_value_z = 5.0
self.error_value_tipper = .05
self.error_type_tipper = 'abs'
self.wave_sign_impedance = '+'
self.wave_sign_tipper = '+'
self.units = '[mV/km]/[nT]'
self.inv_mode = '1'
self.period_list = None
self.period_step = 1
self.period_min = None
self.period_max = None
self.period_buffer = None
self.max_num_periods = None
self.data_period_list = None
self.data_fn = 'ModEM_Data.dat'
self.save_path = os.getcwd()
self.formatting = '1'
self._rotation_angle = 0.0
self._set_rotation_angle(self._rotation_angle)
self.center_point = None
self.data_array = None
self.mt_dict = None
self.model_utm_zone = None
self.model_epsg = None
self._z_shape = (1, 2, 2)
self._t_shape = (1, 1, 2)
self._dtype = [('station', '|S10'),
('lat', np.float),
('lon', np.float),
('elev', np.float),
('rel_east', np.float),
('rel_north', np.float),
('east', np.float),
('north', np.float),
('zone', '|S4'),
('z', (np.complex, self._z_shape)),
('z_err', (np.float, self._z_shape)),
('z_inv_err', (np.float, self._z_shape)),
('tip', (np.complex, self._t_shape)),
('tip_err', (np.float, self._t_shape)),
('tip_inv_err', (np.float, self._t_shape))]
self.inv_mode_dict = {'1':['Full_Impedance', 'Full_Vertical_Components'],
'2':['Full_Impedance'],
'3':['Off_Diagonal_Impedance',
'Full_Vertical_Components'],
'4':['Off_Diagonal_Impedance'],
'5':['Full_Vertical_Components'],
'6':['Full_Interstation_TF'],
'7':['Off_Diagonal_Rho_Phase']}
self.inv_comp_dict = {'Full_Impedance':['zxx', 'zxy', 'zyx', 'zyy'],
'Off_Diagonal_Impedance':['zxy', 'zyx'],
'Full_Vertical_Components':['tx', 'ty']}
self.comp_index_dict = {'zxx': (0, 0), 'zxy':(0, 1), 'zyx':(1, 0),
'zyy':(1, 1), 'tx':(0, 0), 'ty':(0, 1)}
self.header_string = ' '.join(['# Period(s)',
'Code',
'GG_Lat',
'GG_Lon',
'X(m)',
'Y(m)',
'Z(m)',
'Component',
'Real',
'Imag',
'Error\n'])
for key in kwargs.keys():
setattr(self, key, kwargs[key])
def _set_dtype(self, z_shape, t_shape):
"""
reset dtype
"""
self._z_shape = z_shape
self._t_shape = t_shape
self._dtype = [('station', '|S10'),
('lat', np.float),
('lon', np.float),
('elev', np.float),
('rel_east', np.float),
('rel_north', np.float),
('east', np.float),
('north', np.float),
('zone', '|S4'),
('z', (np.complex, self._z_shape)),
('z_err', (np.float, self._z_shape)),
('z_inv_err', (np.float, self._z_shape)),
('tip', (np.complex, self._t_shape)),
('tip_err', (np.float, self._t_shape)),
('tip_inv_err', (np.float, self._t_shape))]
def get_header_string(self, error_type, error_value, rotation_angle):
"""
reset the header sring for file
"""
h_str = ','.join(['# Created using MTpy calculated {0} error of {1:.0f}%',
' data rotated {2:.1f}_deg clockwise from N\n'])
return h_str.format(error_type, error_value, rotation_angle)
def get_mt_dict(self):
"""
get mt_dict from edi file list
"""
if self.edi_list is None:
raise ModEMError('edi_list is None, please input a list of '
'.edi files containing the full path')
if len(self.edi_list) == 0:
raise ModEMError('edi_list is empty, please input a list of '
'.edi files containing the full path' )
self.mt_dict = {}
for edi in self.edi_list:
mt_obj = mt.MT(edi)
self.mt_dict[mt_obj.station] = mt_obj
def get_relative_station_locations(self):
"""
get station locations from edi files
"""
stations_obj = Stations(model_epsg=self.model_epsg,
model_utm_zone=self.model_utm_zone)
mt_list = [self.mt_dict[s_key] for s_key in sorted(self.mt_dict.keys())]
stations_obj.get_station_locations(mt_list)
# rotate locations if needed
if self._rotation_angle != 0:
stations_obj.rotate_stations(self._rotation_angle)
# fill data array
self.data_array[:]['station'] = stations_obj.station
self.data_array[:]['lat'] = stations_obj.lat
self.data_array[:]['lon'] = stations_obj.lon
self.data_array[:]['east'] = stations_obj.east
self.data_array[:]['north'] = stations_obj.north
self.data_array[:]['elev'] = stations_obj.elev
self.data_array[:]['rel_east'] = stations_obj.rel_east
self.data_array[:]['rel_north'] = stations_obj.rel_north
self.data_array[:]['zone'] = stations_obj.utm_zone
# get center point
self.center_point = stations_obj.center_point
def get_period_list(self):
"""
make a period list to invert for
"""
if self.mt_dict is None:
self.get_mt_dict()
if self.period_list is not None:
print '-'*50
print 'Inverting for periods:'
for per in self.period_list:
print ' {0:<12.6f}'.format(per)
print '-'*50
return
data_period_list = []
for s_key in sorted(self.mt_dict.keys()):
mt_obj = self.mt_dict[s_key]
data_period_list.extend(list(1./mt_obj.Z.freq))
self.data_period_list = np.array(sorted(list(set(data_period_list)),
reverse=False))
if self.period_min is not None:
if self.period_max is None:
raise ModEMError('Need to input period_max')
if self.period_max is not None:
if self.period_min is None:
raise ModEMError('Need to input period_min')
if self.period_min is not None and self.period_max is not None:
if self.max_num_periods is None:
raise ModEMError('Need to input number of periods to use')
min_index = np.where(self.data_period_list >= self.period_min)[0][0]
max_index = np.where(self.data_period_list <= self.period_max)[0][-1]
pmin = np.log10(self.data_period_list[min_index])
pmax = np.log10(self.data_period_list[max_index])
self.period_list = np.logspace(pmin, pmax, num=self.max_num_periods)
print '-'*50
print 'Inverting for periods:'
for per in self.period_list:
print ' {0:<12.6f}'.format(per)
print '-'*50
if self.period_list is None:
raise ModEMError('Need to input period_min, period_max, '
'max_num_periods or a period_list')
def _set_rotation_angle(self, rotation_angle):
"""
on set rotation angle rotate mt_dict and data_array,
"""
if self._rotation_angle == rotation_angle:
return
print 'Changing rotation angle from {0:.1f} to {1:.1f}'.format(
self._rotation_angle, rotation_angle)
self._rotation_angle = -self._rotation_angle+rotation_angle
if self.rotation_angle == 0:
return
print 'Changing rotation angle from {0:.1f} to {1:.1f}'.format(
self._rotation_angle, rotation_angle)
self._rotation_angle = rotation_angle
if self.mt_dict is None:
self.get_mt_dict()
for mt_key in sorted(self.mt_dict.keys()):
mt_obj = self.mt_dict[mt_key]
mt_obj.Z.rotate(self._rotation_angle)
mt_obj.Tipper.rotate(self._rotation_angle)
print 'Data rotated to align with {0:.1f} deg clockwise from N'.format(
self._rotation_angle)
self.fill_data_array()
def _get_rotation_angle(self):
return self._rotation_angle
rotation_angle = property(fget=_get_rotation_angle,
fset=_set_rotation_angle,
doc="""Rotate data assuming N=0, E=90""")
def fill_data_array(self):
"""
fill the data array from mt_dict
"""
if self.period_list is None:
self.get_period_list()
ns = len(self.mt_dict.keys())
nf = len(self.period_list)
d_array = False
if self.data_array is not None:
d_arr_copy = self.data_array.copy()
d_array = True
self._set_dtype((nf, 2, 2), (nf, 1, 2))
self.data_array = np.zeros(ns, dtype=self._dtype)
rel_distance = True
for ii, s_key in enumerate(sorted(self.mt_dict.keys())):
mt_obj = self.mt_dict[s_key]
if d_array is True:
try:
d_index = np.where(d_arr_copy['station'] == s_key)[0][0]
self.data_array[ii]['station'] = s_key
self.data_array[ii]['lat'] = d_arr_copy[d_index]['lat']
self.data_array[ii]['lon'] = d_arr_copy[d_index]['lon']
self.data_array[ii]['east'] = d_arr_copy[d_index]['east']
self.data_array[ii]['north'] = d_arr_copy[d_index]['north']
self.data_array[ii]['elev'] = d_arr_copy[d_index]['elev']
self.data_array[ii]['rel_east'] = d_arr_copy[d_index]['rel_east']
self.data_array[ii]['rel_north'] = d_arr_copy[d_index]['rel_north']
self.data_array[:]['zone'] = d_arr_copy[d_index]['zone']
except IndexError:
print 'Could not find {0} in data_array'.format(s_key)
else:
self.data_array[ii]['station'] = mt_obj.station
self.data_array[ii]['lat'] = mt_obj.lat
self.data_array[ii]['lon'] = mt_obj.lon
self.data_array[ii]['east'] = mt_obj.east
self.data_array[ii]['north'] = mt_obj.north
self.data_array[ii]['elev'] = mt_obj.elev
try:
self.data_array[ii]['rel_east'] = mt_obj.grid_east
self.data_array[ii]['rel_north'] = mt_obj.grid_north
rel_distance = False
except AttributeError:
pass
# interpolate each station onto the period list
# check bounds of period list
interp_periods = self.period_list[np.where(
(self.period_list >= 1./mt_obj.Z.freq.max()) &
(self.period_list <= 1./mt_obj.Z.freq.min()))]
interp_z, interp_t = mt_obj.interpolate(1./interp_periods)
for kk, ff in enumerate(interp_periods):
jj = np.where(self.period_list == ff)[0][0]
self.data_array[ii]['z'][jj] = interp_z.z[kk, :, :]
self.data_array[ii]['z_err'][jj] = interp_z.z_err[kk, :, :]
if mt_obj.Tipper.tipper is not None:
self.data_array[ii]['tip'][jj] = interp_t.tipper[kk, :, :]
self.data_array[ii]['tip_err'][jj] = \
interp_t.tipper_err[kk, :, :]
if rel_distance is False:
self.get_relative_station_locations()
def _set_station_locations(self, station_obj=None):
"""
take a station_locations array and populate data_array
"""
if station_obj is not None:
station_locations = station_obj.station_locations
if self.data_array is None:
self._set_dtype((len(self.period_list), 2, 2),
(len(self.period_list), 1, 2))
self.data_array = np.zeros(station_locations.size,
dtype=self._dtype)
for d_index, s_arr in enumerate(station_locations):
self.data_array[d_index]['lat'] = s_arr['lat']
self.data_array[d_index]['lon'] = s_arr['lon']
self.data_array[d_index]['east'] = s_arr['east']
self.data_array[d_index]['north'] = s_arr['north']
self.data_array[d_index]['elev'] = s_arr['elev']
self.data_array[d_index]['rel_east'] = s_arr['rel_east']
self.data_array[d_index]['rel_north'] = s_arr['rel_north']
else:
for s_arr in station_locations:
try:
d_index = np.where(self.data_array['station'] ==
s_arr['station'])[0][0]
except IndexError:
print 'Could not find {0} in data_array'.format(s_arr['station'])
d_index = None
if d_index is not None:
self.data_array[d_index]['lat'] = s_arr['lat']
self.data_array[d_index]['lon'] = s_arr['lon']
self.data_array[d_index]['east'] = s_arr['east']
self.data_array[d_index]['north'] = s_arr['north']
self.data_array[d_index]['elev'] = s_arr['elev']
self.data_array[d_index]['rel_east'] = s_arr['rel_east']
self.data_array[d_index]['rel_north'] = s_arr['rel_north']
def _get_station_locations(self):
"""
extract station locations from data array
"""
if self.data_array is None:
return None
station_locations = self.data_array[['station', 'lat', 'lon',
'north', 'east', 'elev',
'rel_north', 'rel_east','zone']]
station_obj = Stations(model_epsg=self.model_epsg,
model_utm_zone=self.model_utm_zone)
station_obj.station_locations = station_locations
return station_obj
station_locations = property(_get_station_locations,
_set_station_locations,
doc="""location of stations""")
def compute_inv_error(self):
"""
compute the error from the given parameters
"""
# copy values over to inversion error
self.data_array['z_inv_err'] = self.data_array['z_err']
self.data_array['tip_inv_err'] = self.data_array['tip_err']
#compute relative error for tipper
if 'floor' in self.error_type_tipper:
t_index = np.where(self.data_array['tip_err'] < self.error_value_tipper)
self.data_array['tip_inv_err'][t_index] = self.error_value_tipper
elif 'abs' in self.error_type_tipper:
self.data_array['tip_inv_err'][:] = self.error_value_tipper
# compute error for z
err_value = self.error_value_z/100.
for ss in range(self.data_array.shape[0]):
for ff in range(max([self._t_shape[0], self._z_shape[0]])):
d_xx = abs(self.data_array['z'][ss, ff, 0, 0])
d_xy = abs(self.data_array['z'][ss, ff, 0, 1])
d_yx = abs(self.data_array['z'][ss, ff, 1, 0])
d_yy = abs(self.data_array['z'][ss, ff, 1, 1])
d = np.array([d_xx, d_xy, d_yx, d_yy])
nz = np.nonzero(d)
if d.sum() == 0.0:
continue
if 'egbert' in self.error_type_z:
if d_xy == 0.0:
d_xy = 1.0
if d_yx == 0.0:
d_yx = 1.0
err = err_value*np.sqrt(d_xy*d_yx)
if err == 1.0:
err = max([d_xx, d_xy, d_yx, d_yy])*10
elif 'median' in self.error_type_z:
err = err_value*np.median(d[nz])
elif 'mean_od' in self.error_type_z:
d = np.array(d_xy, d_yx)
nz = np.nonzero(d)
err = err_value*np.mean(d[nz])
elif 'eigen' in self.error_type_z:
d = d.reshape((2, 2))
err = err_value*np.abs(np.linalg.eigvals(d)).mean()
if err == 0:
err = err_value*d.flatten()[nz].mean()
else:
raise NameError('{0} not understood'.format(self.error_type_z))
self.data_array['z_inv_err'][ss, ff, :, :] = err
# if there is an error floor
if 'floor' in self.error_type_z:
f_index = np.where(self.data_array['z_inv_err'] < self.data_array['z_err'])
self.data_array['z_inv_err'][f_index] = self.data_array['z_err'][f_index]
def write_data_file(self, save_path=None, fn_basename=None,
rotation_angle=None, compute_error=True, fill=True,
elevation=False):
"""
write data file for ModEM
will save file as save_path/fn_basename
Arguments:
------------
**save_path** : string
directory path to save data file to.
*default* is cwd
**fn_basename** : string
basename to save data file as
*default* is 'ModEM_Data.dat'
**rotation_angle** : float
angle to rotate the data by assuming N = 0,
E = 90. *default* is 0.0
Outputs:
----------
**data_fn** : string
full path to created data file
:Example: ::
>>> import os
>>> import mtpy.modeling.modem as modem
>>> edi_path = r"/home/mt/edi_files"
>>> edi_list = [os.path.join(edi_path, edi) \
for edi in os.listdir(edi_path)\
if edi.find('.edi') > 0]
import mtpy.modeling.ModEM >>> md = mtpy.modeling.ModEM.Data(edi_list, period_min=.1, period_max=300,\
max_num_periods=12)
>>> md.write_data_file(save_path=r"/home/modem/inv1")
"""
if save_path is not None:
self.save_path = save_path
if fn_basename is not None:
self.data_fn = fn_basename
self.data_fn = os.path.join(self.save_path, self.data_fn)
self.get_period_list()
#rotate data if desired
if rotation_angle is not None:
self.rotation_angle = rotation_angle
#be sure to fill in data array
if fill:
self.fill_data_array()
# get relative station locations in grid coordinates
self.get_relative_station_locations()
if elevation is False:
self.data_array['elev'][:] = 0.0
dlines = []
for inv_mode in self.inv_mode_dict[self.inv_mode]:
if 'impedance' in inv_mode.lower():
dlines.append(self.get_header_string(self.error_type_z,
self.error_value_z,
self.rotation_angle))
nsta = len(np.nonzero(np.abs(self.data_array['z']).sum(axis=(1,2,3)))[0])
nper = len(np.nonzero(np.abs(self.data_array['z']).sum(axis=(0,2,3)))[0])
elif 'vertical' in inv_mode.lower():
dlines.append(self.get_header_string(self.error_type_tipper,
self.error_value_tipper,
self.rotation_angle))
nsta = len(np.nonzero(np.abs(self.data_array['tip']).sum(axis=(1,2,3)))[0])
nper = len(np.nonzero(np.abs(self.data_array['tip']).sum(axis=(0,2,3)))[0])
dlines.append(self.header_string)
dlines.append('> {0}\n'.format(inv_mode))
if inv_mode.find('Impedance') > 0:
dlines.append('> exp({0}i\omega t)\n'.format(self.wave_sign_impedance))
dlines.append('> {0}\n'.format(self.units))
elif inv_mode.find('Vertical') >= 0:
dlines.append('> exp({0}i\omega t)\n'.format(self.wave_sign_tipper))
dlines.append('> []\n')
dlines.append('> 0\n') #oriention, need to add at some point
dlines.append('> {0:>10.6f} {1:>10.6f}\n'.format(
self.center_point.lat[0], self.center_point.lon[0]))
dlines.append('> {0} {1}\n'.format(nper,nsta))
if compute_error == True:
self.compute_inv_error()
for ss in range(self.data_array['z'].shape[0]):
for ff in range(self.data_array['z'].shape[1]):
for comp in self.inv_comp_dict[inv_mode]:
#index values for component with in the matrix
z_ii, z_jj = self.comp_index_dict[comp]
#get the correct key for data array according to comp
if comp.find('z') == 0:
c_key = 'z'
elif comp.find('t') == 0:
c_key = 'tip'
#get the value for that compenent at that frequency
zz = self.data_array[ss][c_key][ff, z_ii, z_jj]
if zz.real != 0.0 and zz.imag != 0.0 and \
zz.real != 1e32 and zz.imag != 1e32:
if self.formatting == '1':
per = '{0:<12.5e}'.format(self.period_list[ff])
sta = '{0:>7}'.format(self.data_array[ss]['station'])
lat = '{0:> 9.3f}'.format(self.data_array[ss]['lat'])
lon = '{0:> 9.3f}'.format(self.data_array[ss]['lon'])
eas = '{0:> 12.3f}'.format(self.data_array[ss]['rel_east'])
nor = '{0:> 12.3f}'.format(self.data_array[ss]['rel_north'])
ele = '{0:> 12.3f}'.format(self.data_array[ss]['elev'])
com = '{0:>4}'.format(comp.upper())
if self.units == 'ohm':
rea = '{0:> 14.6e}'.format(zz.real/796.)
ima = '{0:> 14.6e}'.format(zz.imag/796.)
else:
rea = '{0:> 14.6e}'.format(zz.real)
ima = '{0:> 14.6e}'.format(zz.imag)
elif self.formatting == '2':
per = '{0:<14.6e}'.format(self.period_list[ff])
sta = '{0:<10}'.format(self.data_array[ss]['station'])
lat = '{0:> 14.6f}'.format(self.data_array[ss]['lat'])
lon = '{0:> 14.6f}'.format(self.data_array[ss]['lon'])
eas = '{0:> 12.3f}'.format(self.data_array[ss]['rel_east'])
nor = '{0:> 15.3f}'.format(self.data_array[ss]['rel_north'])
ele = '{0:> 10.3f}'.format(self.data_array[ss]['elev'])
com = '{0:>12}'.format(comp.upper())
if self.units == 'ohm':
rea = '{0:> 17.6e}'.format(zz.real/796.)
ima = '{0:> 17.6e}'.format(zz.imag/796.)
else:
rea = '{0:> 17.6e}'.format(zz.real)
ima = '{0:> 17.6e}'.format(zz.imag)
# get error from inversion error
abs_err = self.data_array['{0}_inv_err'.format(c_key)][ss, ff, z_ii, z_jj]
if np.isinf(abs_err) or np.isnan(abs_err):
abs_err = 10**(np.floor(np.log10(abs(max([float(rea),
float(ima)])))))
abs_err = '{0:> 14.6e}'.format(abs(abs_err))
#make sure that x==north, y==east, z==+down
dline = ''.join([per, sta, lat, lon, nor, eas, ele,
com, rea, ima, abs_err, '\n'])
dlines.append(dline)
with open(self.data_fn, 'w') as dfid:
dfid.writelines(dlines)
print 'Wrote ModEM data file to {0}'.format(self.data_fn)
return self.data_fn
def convert_ws3dinv_data_file(self, ws_data_fn, station_fn=None,
save_path=None, fn_basename=None):
"""
convert a ws3dinv data file into ModEM format
Arguments:
------------
**ws_data_fn** : string
full path to WS data file
**station_fn** : string
full path to station info file output by
mtpy.modeling.ws3dinv. Or you can create one using
mtpy.modeling.ws3dinv.WSStation
**save_path** : string
directory path to save data file to.
*default* is cwd
**fn_basename** : string
basename to save data file as
*default* is 'ModEM_Data.dat'
Outputs:
-----------
**data_fn** : string
full path to created data file
:Example: ::
import mtpy.modeling.ModEM >>> import mtpy.modeling.modem as modem
>>> mdr = mtpy.modeling.ModEM.Data()
>>> mdr.convert_ws3dinv_data_file(r"/home/ws3dinv/inv1/WSData.dat",
station_fn=r"/home/ws3dinv/inv1/WS_Station_Locations.txt")
"""
if os.path.isfile(ws_data_fn) == False:
raise ws.WSInputError('Did not find {0}, check path'.format(ws_data_fn))
if save_path is not None:
self.save_path = save_path
else:
self.save_path = os.path.dirname(ws_data_fn)
if fn_basename is not None:
self.fn_basename = fn_basename
#--> get data from data file
wsd = ws.WSData()
wsd.read_data_file(ws_data_fn, station_fn=station_fn)
ns = wsd.data['station'].shape[0]
nf = wsd.period_list.shape[0]
self.period_list = wsd.period_list.copy()
self._set_dtype((nf, 2, 2), (nf, 1, 2))
self.data_array = np.zeros(ns, dtype=self._dtype)
#--> fill data array
for ii, d_arr in enumerate(wsd.data):
self.data_array[ii]['station'] = d_arr['station']
self.data_array[ii]['rel_east'] = d_arr['east']
self.data_array[ii]['rel_north'] = d_arr['north']
self.data_array[ii]['z'][:] = d_arr['z_data']
self.data_array[ii]['z_err'][:] = d_arr['z_data_err'].real*\
d_arr['z_err_map'].real
self.data_array[ii]['station'] = d_arr['station']
self.data_array[ii]['lat'] = 0.0
self.data_array[ii]['lon'] = 0.0
self.data_array[ii]['rel_east'] = d_arr['east']
self.data_array[ii]['rel_north'] = d_arr['north']
self.data_array[ii]['elev'] = 0.0
#need to change the inversion mode to be the same as the ws_data file
if self.data_array['z'].all() == 0.0:
if self.data_array['tip'].all() == 0.0:
self.inv_mode = '4'
else:
self.inv_mode = '3'
else:
if self.data_array['tip'].all() == 0.0:
self.inv_mode = '2'
else:
self.inv_mode = '1'
#-->write file
self.write_data_file()
def convert_modem_to_ws(self, data_fn=None, ws_data_fn=None,
error_map=[1, 1, 1, 1]):
"""
convert a ModEM data file to WS format.
Arguments
-------------
**data_fn** : string
full path to modem data file to convert
**ws_data_fn** : string
full path to write ws format data file
**error_map** : [zxx, zxy, zyx, zyy] floats
error map that ws uses, weights for each component
*default* is [1, 1, 1, 1] for equal weighting
Returns
------------
**ws_data_fn** : string
full path of ws data file
**ws_station_fn** : string
full path to ws station file
Example
-----------
:Convert ModEM data file to WS: ::
import mtpy.modeling.ModEM >>> import mtpy.modeling.modem as modem
>>> md = mtpy.modeling.ModEM.Data()
>>> md.convert_modem_to_ws(data_fn=r"/home/mt/modem/data.dat")
"""
if self.data_fn is not None:
self.read_data_file(data_fn)
if ws_data_fn == None:
save_path = os.path.dirname(self.data_fn)
ws_data_fn = os.path.join(save_path, 'WS_Data.dat')
else:
save_path = os.path.dirname(ws_data_fn)
station_info = ws.WSStation()
station_info.east = self.data_array['rel_east']
station_info.north = self.data_array['rel_north']
station_info.names = self.data_array['station']
station_info.elev = self.data_array['elev']
station_info.save_path = save_path
station_info.write_station_file()
ws_data = ws.WSData()
ws_data.period_list = self.period_list.copy()
ws_data.z_err_map = error_map
ws_data.z_err = 'data'
z_shape = (self.period_list.size, 2, 2)
data_dtype = [('station', '|S10'),
('east', np.float),
('north', np.float),
('z_data', (np.complex, z_shape)),
('z_data_err', (np.complex, z_shape)),
('z_err_map', (np.complex, z_shape))]
ws_data.data = np.zeros(self.data_array['station'].size,
dtype=data_dtype)
ws_data.data['station'][:] = self.data_array['station']
ws_data.data['east'] = self.data_array['rel_east']
ws_data.data['north'] = self.data_array['rel_north']
ws_data.data['z_data'][:, :, :] = self.data_array['z']
ws_data.data['z_data_err'][:, :, :] = self.data_array['z_err']*(1+1j)
ws_data.data['z_err_map'][:, :, :] = np.array([[1, 1], [1, 1]])
ws_data.write_data_file(save_path=save_path, data_fn=ws_data_fn)
return ws_data.data_fn, station_info.station_fn
def read_data_file(self, data_fn=None):
"""
read ModEM data file
Fills attributes:
* data_array
* period_list
* mt_dict
"""
if data_fn is not None:
self.data_fn = data_fn
self.save_path = os.path.dirname(self.data_fn)
self.fn_basename = os.path.basename(self.data_fn)
if self.data_fn is None:
raise ModEMError('data_fn is None, enter a data file to read.')
elif os.path.isfile(self.data_fn) is False:
raise ModEMError('Could not find {0}, check path'.format(self.data_fn))
dfid = file(self.data_fn, 'r')
dlines = dfid.readlines()
dfid.close()
header_list = []
metadata_list = []
data_list = []
period_list = []
station_list = []
read_impedance = False
read_tipper = False
inv_list = []
for dline in dlines:
if dline.find('#') == 0:
header_list.append(dline.strip())
elif dline.find('>') == 0:
metadata_list.append(dline[1:].strip())
if dline.lower().find('ohm') > 0:
self.units = 'ohm'
elif dline.lower().find('mv') > 0:
self.units =' [mV/km]/[nT]'
elif dline.lower().find('vertical') > 0:
read_tipper = True
read_impedance = False
inv_list.append('Full_Vertical_Components')
elif dline.lower().find('impedance') > 0:
read_impedance = True
read_tipper = False
inv_list.append('Full_Impedance')
if dline.find('exp') > 0:
if read_impedance is True:
self.wave_sign_impedance = dline[dline.find('(')+1]
elif read_tipper is True:
self.wave_sign_tipper = dline[dline.find('(')+1]
elif len(dline[1:].strip().split()) == 2:
if dline.find('.') > 0:
value_list = [float(value) for value in
dline[1:].strip().split()]
self.center_point = np.recarray(1, dtype=[('station', '|S10'),
('lat', np.float),
('lon', np.float),
('elev', np.float),
('rel_east', np.float),
('rel_north', np.float),
('east', np.float),
('north', np.float),
('zone', 'S4')])
self.center_point.lat = value_list[0]
self.center_point.lon = value_list[1]
ce, cn, cz = gis_tools.project_point_ll2utm(self.center_point.lat,
self.center_point.lon)
self.center_point.east = ce
self.center_point.north = cn
self.center_point.zone = cz
else:
pass
else:
dline_list = dline.strip().split()
if len(dline_list) == 11:
for ii, d_str in enumerate(dline_list):
if ii != 1:
try:
dline_list[ii] = float(d_str.strip())
except ValueError:
pass
# be sure the station name is a string
else:
dline_list[ii] = d_str.strip()
period_list.append(dline_list[0])
station_list.append(dline_list[1])
data_list.append(dline_list)
#try to find rotation angle
h_list = header_list[0].split()
for hh, h_str in enumerate(h_list):
if h_str.find('_deg') > 0:
try:
self._rotation_angle = float(h_str[0:h_str.find('_deg')])
print ('Set rotation angle to {0:.1f} '.format(
self._rotation_angle)+'deg clockwise from N')
except ValueError:
pass
# find inversion mode
for inv_key in self.inv_mode_dict.keys():
inv_mode_list = self.inv_mode_dict[inv_key]
if len(inv_mode_list) != inv_list:
continue
else:
tf_arr = np.zeros(len(inv_list), dtype=np.bool)
for tf, data_inv in enumerate(inv_list):
if data_inv in self.inv_mode_dict[inv_key]:
tf_arr[tf] = True
if np.alltrue(tf_arr) == True:
self.inv_mode = inv_key
break
self.period_list = np.array(sorted(set(period_list)))
station_list = sorted(set(station_list))
#make a period dictionary to with key as period and value as index
period_dict = dict([(per, ii) for ii, per in enumerate(self.period_list)])
#--> need to sort the data into a useful fashion such that each station
# is an mt object
data_dict = {}
z_dummy = np.zeros((len(self.period_list), 2, 2), dtype='complex')
t_dummy = np.zeros((len(self.period_list), 1, 2), dtype='complex')
index_dict = {'zxx': (0, 0), 'zxy':(0, 1), 'zyx':(1, 0), 'zyy':(1, 1),
'tx':(0, 0), 'ty':(0, 1)}
#dictionary for true false if station data (lat, lon, elev, etc)
#has been filled already so we don't rewrite it each time
tf_dict = {}
for station in station_list:
data_dict[station] = mt.MT()
data_dict[station].Z = mtz.Z(z_array=z_dummy.copy(),
z_err_array=z_dummy.copy().real,
freq=1./self.period_list)
data_dict[station].Tipper = mtz.Tipper(tipper_array=t_dummy.copy(),
tipper_err_array=t_dummy.copy().real,
freq=1./self.period_list)
#make sure that the station data starts out with false to fill
#the data later
tf_dict[station] = False
#fill in the data for each station
for dd in data_list:
#get the period index from the data line
p_index = period_dict[dd[0]]
#get the component index from the data line
ii, jj = index_dict[dd[7].lower()]
#if the station data has not been filled yet, fill it
if tf_dict[dd[1]] == False:
data_dict[dd[1]].lat = dd[2]
data_dict[dd[1]].lon = dd[3]
data_dict[dd[1]].grid_north = dd[4]
data_dict[dd[1]].grid_east = dd[5]
data_dict[dd[1]].grid_elev = dd[6]
data_dict[dd[1]].station = dd[1]
tf_dict[dd[1]] = True
#fill in the impedance tensor with appropriate values
if dd[7].find('Z') == 0:
z_err = dd[10]
if self.wave_sign_impedance == '+':
z_value = dd[8]+1j*dd[9]
elif self.wave_sign_impedance == '-':
z_value = dd[8]-1j*dd[9]
if self.units == 'ohm':
z_value *= 796.
z_err *= 796.
data_dict[dd[1]].Z.z[p_index, ii, jj] = z_value
data_dict[dd[1]].Z.z_err[p_index, ii, jj] = z_err
#fill in tipper with appropriate values
elif dd[7].find('T') == 0:
if self.wave_sign_tipper == '+':
data_dict[dd[1]].Tipper.tipper[p_index, ii, jj] = dd[8]+1j*dd[9]
elif self.wave_sign_tipper == '-':
data_dict[dd[1]].Tipper.tipper[p_index, ii, jj] = dd[8]-1j*dd[9]
data_dict[dd[1]].Tipper.tipper_err[p_index, ii, jj] = dd[10]
#make mt_dict an attribute for easier manipulation later
self.mt_dict = data_dict
ns = len(self.mt_dict.keys())
nf = len(self.period_list)
self._set_dtype((nf, 2, 2), (nf, 1, 2))
self.data_array = np.zeros(ns, dtype=self._dtype)
#Be sure to caclulate invariants and phase tensor for each station
for ii, s_key in enumerate(sorted(self.mt_dict.keys())):
mt_obj = self.mt_dict[s_key]
#self.mt_dict[s_key].zinv.compute_invariants()
self.mt_dict[s_key].pt.set_z_object(mt_obj.Z)
self.mt_dict[s_key].Tipper.compute_amp_phase()
self.mt_dict[s_key].Tipper.compute_mag_direction()
self.data_array[ii]['station'] = mt_obj.station
self.data_array[ii]['lat'] = mt_obj.lat
self.data_array[ii]['lon'] = mt_obj.lon
self.data_array[ii]['east'] = mt_obj.east
self.data_array[ii]['north'] = mt_obj.north
self.data_array[ii]['elev'] = mt_obj.grid_elev
self.data_array[ii]['rel_east'] = mt_obj.grid_east
self.data_array[ii]['rel_north'] = mt_obj.grid_north
self.data_array[ii]['z'][:] = mt_obj.Z.z
self.data_array[ii]['z_err'][:] = mt_obj.Z.z_err
self.data_array[ii]['z_inv_err'][:] = mt_obj.Z.z_err
self.data_array[ii]['tip'][:] = mt_obj.Tipper.tipper
self.data_array[ii]['tip_err'][:] = mt_obj.Tipper.tipper_err
self.data_array[ii]['tip_inv_err'][:] = mt_obj.Tipper.tipper_err
def write_vtk_station_file(self, vtk_save_path=None,
vtk_fn_basename='ModEM_stations'):
"""
write a vtk file for station locations. For now this in relative
coordinates.
Arguments:
-------------
**vtk_save_path** : string
directory to save vtk file to.
*default* is Model.save_path
**vtk_fn_basename** : string
filename basename of vtk file
*default* is ModEM_stations, evtk will add
on the extension .vtu
"""
if vtk_save_path is None:
vtk_fn = os.path.join(self.save_path, vtk_fn_basename)
else:
vtk_fn = os.path.join(vtk_save_path, vtk_fn_basename)
pointsToVTK(vtk_fn,
self.station_locations.rel_north/1000.,
self.station_locations.rel_east/1000.,
self.station_locations.elev/1000.,
data={'elevation':self.station_locations.elev})
print '--> Wrote station file to {0}'.format(vtk_fn)
print '-'*50
def get_parameters(self):
"""
get important parameters for documentation
"""
parameter_list = ['error_type_z',
'error_value_z',
'error_type_tipper',
'error_value_tipper',
'wave_sign_impedance',
'wave_sign_tipper',
'rotation_angle',
'save_path']
parameter_dict = {}
for parameter in parameter_list:
key = 'data.{0}'.format(parameter)
parameter_dict[key] = getattr(self, parameter)
parameter_dict['data.period_min'] = self.period_list.min()
parameter_dict['data.period_max'] = self.period_list.max()
parameter_dict['data.period_num'] = self.period_list.size
parameter_dict['data.inv_mode'] = self.inv_mode_dict[self.inv_mode]
parameter_dict['data.num_stations'] = self.station_locations.station.size
parameter_dict['data.center_point_ll'] = (self.center_point.lat[0],
self.center_point.lon[0])
parameter_dict['data.center_point_utm'] = (self.center_point.north[0],
self.center_point.east[0],
self.center_point.zone[0])
return parameter_dict
def center_stations(self, model_fn, data_fn=None):
"""
Center station locations to the middle of cells, might be useful for
topography.
Arguments
-----------
**data_fn** : string
full path to data file
**model_fn** : string
full path to model file
**new_data_fn** : string
full path to new data file
*default* is None, which save as
data_fn_center.dat
Returns
-----------
**new_data_fn** : string
full path to new data file
"""
if data_fn is not None:
self.read_data_file(data_fn)
m_obj = Model()
m_obj.read_model_file(model_fn)
for s_arr in self.station_locations.station_locations:
e_index = np.where(m_obj.grid_east >= s_arr['rel_east'])[0][0]-1
n_index = np.where(m_obj.grid_north >= s_arr['rel_north'])[0][0]-1
mid_east = m_obj.grid_east[e_index:e_index+2].mean()
mid_north = m_obj.grid_north[n_index:n_index+2].mean()
s_index = np.where(self.data_array['station']==s_arr['station'])[0][0]
self.data_array[s_index]['rel_east'] = mid_east
self.data_array[s_index]['rel_north'] = mid_north
def change_data_elevation(self, model_fn, data_fn=None,
res_air=1e12):
"""
At each station in the data file rewrite the elevation, so the station is
on the surface, not floating in air.
Arguments:
------------------
*data_fn* : string
full path to a ModEM data file
*model_fn* : string
full path to ModEM model file that has elevation
incoorporated.
*new_data_fn* : string
full path to new data file name. If None, then
new file name will add _elev.dat to input filename
*res_air* : float
resistivity of air. Default is 1E12 Ohm-m
Returns:
-------------
*new_data_fn* : string
full path to new data file.
"""
if data_fn is not None:
self.read_data_file(data_fn)
m_obj = Model()
m_obj.read_model_file(model_fn)
s_locations = self.station_locations.station_locations.copy()
# need to subtract one because we are finding the cell next to it
for s_arr in s_locations:
e_index = np.where(m_obj.grid_east >= s_arr['rel_east'])[0][0]-1
n_index = np.where(m_obj.grid_north >= s_arr['rel_north'])[0][0]-1
z_index = np.where(m_obj.res_model[n_index, e_index, :] < res_air*.9)[0][0]
s_index = np.where(self.data_array['station']==s_arr['station'])[0][0]
self.data_array[s_index]['elev'] = m_obj.grid_z[z_index]
def project_stations_on_topography(self, model_object, air_resistivity=1e12):
"""
This method is used in add_topography().
It will Re-write the data file to change the elevation column.
And update covariance mask according topo elevation model.
:param air_resistivity:
:return:
"""
sx = self.station_locations.station_locations['rel_east']
sy = self.station_locations.station_locations['rel_north']
# find index of each station on grid
station_index_x = []
station_index_y = []
for sname in self.station_locations.station_locations['station']:
ss = np.where(self.station_locations.station_locations['station'] == sname)[0][0]
# relative locations of stations
sx, sy = self.station_locations.station_locations['rel_east'][ss], \
self.station_locations.station_locations['rel_north'][ss]
# indices of stations on model grid
sxi = np.where((sx <= model_object.grid_east[1:]) & (
sx > model_object.grid_east[:-1]))[0][0]
syi = np.where((sy <= model_object.grid_north[1:]) & (
sy > model_object.grid_north[:-1]))[0][0]
# first, check if there are any air cells
if np.any(model_object.res_model[syi, sxi] > 0.95 * air_resistivity):
szi = np.amin(
np.where((model_object.res_model[syi, sxi] < 0.95 * air_resistivity))[0])
# otherwise place station at the top of the model
else:
szi = 0
# get relevant grid point elevation
topoval = model_object.grid_z[szi]
station_index_x.append(sxi)
station_index_y.append(syi)
# update elevation in station locations and data array, +1 m as
# data elevation needs to be below the topography (as advised by Naser)
self.station_locations.station_locations['elev'][ss] = topoval + 0.1
self.data_array['elev'][ss] = topoval + 0.1
print self.data_array['elev'][ss]
# logger.debug("Re-write data file after adding topo")
self.write_data_file(fill=False,elevation=True) # (Xi, Yi, Zi) of each station-i may be shifted
# debug self.Data.write_data_file(save_path='/e/tmp', fill=False)
return (station_index_x, station_index_y)
#==============================================================================
# mesh class
#==============================================================================
class Model(object):
"""
make and read a FE mesh grid
The mesh assumes the coordinate system where:
x == North
y == East
z == + down
All dimensions are in meters.
The mesh is created by first making a regular grid around the station area,
then padding cells are added that exponentially increase to the given
extensions. Depth cell increase on a log10 scale to the desired depth,
then padding cells are added that increase exponentially.
Arguments
-------------
**station_object** : mtpy.modeling.modem.Stations object
.. seealso:: mtpy.modeling.modem.Stations
Examples
-------------
:Example 1 --> create mesh first then data file: ::
>>> import mtpy.modeling.modem as modem
>>> import os
>>> # 1) make a list of all .edi files that will be inverted for
>>> edi_path = r"/home/EDI_Files"
>>> edi_list = [os.path.join(edi_path, edi)
for edi in os.listdir(edi_path)
>>> ... if edi.find('.edi') > 0]
>>> # 2) Make a Stations object
>>> stations_obj = modem.Stations()
>>> stations_obj.get_station_locations_from_edi(edi_list)
>>> # 3) make a grid from the stations themselves with 200m cell spacing
>>> mmesh = modem.Model(station_obj)
>>> # change cell sizes
>>> mmesh.cell_size_east = 200,
>>> mmesh.cell_size_north = 200
>>> mmesh.ns_ext = 300000 # north-south extension
>>> mmesh.ew_ext = 200000 # east-west extension of model
>>> mmesh.make_mesh()
>>> # check to see if the mesh is what you think it should be
>>> msmesh.plot_mesh()
>>> # all is good write the mesh file
>>> msmesh.write_model_file(save_path=r"/home/modem/Inv1")
>>> # create data file
>>> md = modem.Data(edi_list, station_locations=mmesh.station_locations)
>>> md.write_data_file(save_path=r"/home/modem/Inv1")
:Example 2 --> Rotate Mesh: ::
>>> mmesh.mesh_rotation_angle = 60
>>> mmesh.make_mesh()
.. note:: ModEM assumes all coordinates are relative to North and East, and
does not accommodate mesh rotations, therefore, here the rotation
is of the stations, which essentially does the same thing. You
will need to rotate you data to align with the 'new' coordinate
system.
==================== ======================================================
Attributes Description
==================== ======================================================
cell_size_east mesh block width in east direction
*default* is 500
cell_size_north mesh block width in north direction
*default* is 500
edi_list list of .edi files to invert for
grid_east overall distance of grid nodes in east direction
grid_north overall distance of grid nodes in north direction
grid_z overall distance of grid nodes in z direction
model_fn full path to initial file name
n_layers total number of vertical layers in model
nodes_east relative distance between nodes in east direction
nodes_north relative distance between nodes in north direction
nodes_z relative distance between nodes in east direction
pad_east number of cells for padding on E and W sides
*default* is 7
pad_north number of cells for padding on S and N sides
*default* is 7
pad_num number of cells with cell_size with outside of
station area. *default* is 3
pad_method method to use to create padding:
extent1, extent2 - calculate based on ew_ext and
ns_ext
stretch - calculate based on pad_stretch factors
pad_stretch_h multiplicative number for padding in horizontal
direction.
pad_stretch_v padding cells N & S will be pad_root_north**(x)
pad_z number of cells for padding at bottom
*default* is 4
ew_ext E-W extension of model in meters
ns_ext N-S extension of model in meters
res_list list of resistivity values for starting model
res_model starting resistivity model
mesh_rotation_angle Angle to rotate the grid to. Angle is measured
positve clockwise assuming North is 0 and east is 90.
*default* is None
save_path path to save file to
station_fn full path to station file
station_locations location of stations
title title in initial file
z1_layer first layer thickness
z_bottom absolute bottom of the model *default* is 300,000
z_target_depth Depth of deepest target, *default* is 50,000
==================== ======================================================
==================== ======================================================
Methods Description
==================== ======================================================
make_mesh makes a mesh from the given specifications
plot_mesh plots mesh to make sure everything is good
write_initial_file writes an initial model file that includes the mesh
==================== ======================================================
"""
def __init__(self, station_object=None, **kwargs):
self.station_locations = station_object
# size of cells within station area in meters
self.cell_size_east = 500
self.cell_size_north = 500
#padding cells on either side
self.pad_east = 7
self.pad_north = 7
self.pad_z = 4
self.pad_num = 3
self.ew_ext = 100000
self.ns_ext = 100000
#root of padding cells
self.pad_stretch_h = 1.2
self.pad_stretch_v = 1.2
# method to use to create padding
self.pad_method = 'extent1'
self.z1_layer = 10
self.z_target_depth = 50000
self.z_bottom = 300000
#number of vertical layers
self.n_layers = 30
#strike angle to rotate grid to
self.mesh_rotation_angle = 0
#--> attributes to be calculated
#grid nodes
self._nodes_east = None
self._nodes_north = None
self._nodes_z = None
#grid locations
self.grid_east = None
self.grid_north = None
self.grid_z = None
#resistivity model
self.res_starting_value = 100.0
self.res_model = None
#inital file stuff
self.model_fn = None
self.save_path = os.getcwd()
self.model_fn_basename = 'ModEM_Model_File.rho'
if self.model_fn is not None:
self.save_path = os.path.dirname(self.model_fn)
self.model_fn_basename = os.path.basename(self.model_fn)
self.title = 'Model File written by MTpy.modeling.modem'
self.res_scale = 'loge'
for key in kwargs.keys():
setattr(self, key, kwargs[key])
### --> make nodes and grid symbiotic so if you set one the other one
### gets set as well
## Nodes East
@property
def nodes_east(self):
if self.grid_east is not None:
self._nodes_east = np.array([abs(self.grid_east[ii+1]-self.grid_east[ii])
for ii in range(self.grid_east.size-1)])
return self._nodes_east
@nodes_east.setter
def nodes_east(self, nodes):
nodes = np.array(nodes)
self._nodes_east = nodes
self.grid_east = np.array([-nodes.sum()/2+nodes[0:ii].sum()
for ii in range(nodes.size)]+\
[nodes.sum()/2])
## Nodes North
@property
def nodes_north(self):
if self.grid_north is not None:
self._nodes_north = np.array([abs(self.grid_north[ii+1]-self.grid_north[ii])
for ii in range(self.grid_north.size-1)])
return self._nodes_north
@nodes_north.setter
def nodes_north(self, nodes):
nodes = np.array(nodes)
self._nodes_north = nodes
self.grid_north = np.array([-nodes.sum()/2+nodes[0:ii].sum()
for ii in range(nodes.size)]+\
[nodes.sum()/2])
@property
def nodes_z(self):
if self.grid_z is not None:
self._nodes_z = np.array([abs(self.grid_z[ii+1]-self.grid_z[ii])
for ii in range(self.grid_z.size-1)])
return self._nodes_z
@nodes_z.setter
def nodes_z(self, nodes):
nodes = np.array(nodes)
self._nodes_z = nodes
self.grid_z = np.array([nodes[0:ii].sum() for ii in range(nodes.size)]+\
[nodes.sum()])
def make_mesh(self):
"""
create finite element mesh according to parameters set.
The mesh is built by:
1. Making a regular grid within the station area.
2. Adding pad_num of cell_width cells outside of station area
3. Adding padding cells to given extension and number of padding
cells.
4. Making vertical cells starting with z1_layer increasing
logarithmically (base 10) to z_target_depth and num_layers.
5. Add vertical padding cells to desired extension.
6. Check to make sure none of the stations lie on a node.
If they do then move the node by .02*cell_width
"""
## --> find the edges of the grid
## calculate the extra width of padding cells
## multiply by 1.5 because this is only for 1 side
pad_width_east = self.pad_num*1.5*self.cell_size_east
pad_width_north = self.pad_num*1.5*self.cell_size_north
## get the extremities
west = self.station_locations.rel_east.min()-pad_width_east
east = self.station_locations.rel_east.max()+pad_width_east
south = self.station_locations.rel_north.min()-pad_width_north
north = self.station_locations.rel_north.max()+pad_width_north
# round the numbers so they are easier to read
west = np.round(west, -2)
east= np.round(east, -2)
south= np.round(south, -2)
north = np.round(north, -2)
#-------make a grid around the stations from the parameters above------
# adjust the edges so we have a whole number of cells
add_ew = ((east - west)%self.cell_size_east)/2.
add_ns = ((north - south)%self.cell_size_north)/2.
#--> make the inner grid first
inner_east = np.arange(west + add_ew - self.cell_size_east,
east - add_ew + 2*self.cell_size_east,
self.cell_size_east)
inner_north = np.arange(south + add_ns + self.cell_size_north,
north - add_ns + 2*self.cell_size_north,
self.cell_size_north)
## compute padding cells
if self.pad_method == 'extent1':
padding_east = mtmesh.get_padding_cells(self.cell_size_east,
self.ew_ext/2-east,
self.pad_east,
self.pad_stretch_h)
padding_north = mtmesh.get_padding_cells(self.cell_size_north,
self.ns_ext/2-north,
self.pad_north,
self.pad_stretch_h)
elif self.pad_method == 'extent2':
padding_east = mtmesh.get_padding_cells2(self.cell_size_east,
inner_east[-1],
self.ew_ext/2.,
self.pad_east)
padding_north = mtmesh.get_padding_cells2(self.cell_size_north,
inner_north[-1],
self.ns_ext/2.,
self.pad_north)
elif self.pad_method == 'stretch':
padding_east = mtmesh.get_padding_from_stretch(self.cell_size_east,
self.pad_stretch_h,
self.pad_east)
padding_north = mtmesh.get_padding_from_stretch(self.cell_size_north,
self.pad_stretch_h,
self.pad_north)
# make the horizontal grid
self.grid_east = np.append(np.append(-1*padding_east[::-1]+inner_east.min(),
inner_east),
padding_east+inner_east.max())
self.grid_north = np.append(np.append(-1*padding_north[::-1]+inner_north.min(),
inner_north),
padding_north+inner_north.max())
#--> need to make sure none of the stations lie on the nodes
for s_east in sorted(self.station_locations.rel_east):
try:
node_index = np.where(abs(s_east-self.grid_east) <
.02*self.cell_size_east)[0][0]
if s_east-self.grid_east[node_index] > 0:
self.grid_east[node_index] -= .02*self.cell_size_east
elif s_east-self.grid_east[node_index] < 0:
self.grid_east[node_index] += .02*self.cell_size_east
except IndexError:
continue
#--> need to make sure none of the stations lie on the nodes
for s_north in sorted(self.station_locations.rel_north):
try:
node_index = np.where(abs(s_north-self.grid_north) <
.02*self.cell_size_north)[0][0]
if s_north-self.grid_north[node_index] > 0:
self.grid_north[node_index] -= .02*self.cell_size_north
elif s_north-self.grid_north[node_index] < 0:
self.grid_north[node_index] += .02*self.cell_size_north
except IndexError:
continue
#--> make depth grid
log_z = np.logspace(np.log10(self.z1_layer),
np.log10(self.z_target_depth-np.logspace(np.log10(self.z1_layer),
np.log10(self.z_target_depth),
num=self.n_layers)[-2]),
num=self.n_layers-self.pad_z)
z_nodes = np.array([np.round(zz, -int(np.floor(np.log10(zz))-1)) for zz in
log_z])
#padding cells in the vertical
z_padding = mtmesh.get_padding_cells(z_nodes[-1],
self.z_bottom-z_nodes.sum(),
self.pad_z,
self.pad_stretch_v)
# make the blocks into nodes as oppose to total width
z_padding = np.array([z_padding[ii+1]-z_padding[ii]
for ii in range(z_padding.size-1)])
self.nodes_z = np.append(z_nodes, z_padding)
#compute grid center
center_east = np.round(self.grid_east.min()-self.grid_east.mean(), -1)
center_north = np.round(self.grid_north.min()-self.grid_north.mean(), -1)
center_z = 0
# this is the value to the lower left corner from the center.
self.grid_center = np.array([center_north, center_east, center_z])
#--> print out useful information
self.get_mesh_params()
def make_mesh_from_center(self, update_data_center=True):
"""
The mesh is built by first finding the center of the station area.
Then cells are added in the north and east direction with width
cell_size_east and cell_size_north to cover all the station area.
Padding cells are then added to extend the model to reduce
edge effects. The number of cells are pad_east and pad_north and the
increase in size is by pad_stretch_h (pad_root_).
The station locations are then computed as the center of the nearest cell
as required by the code (what inversion code?)
The vertical cells are built to increase in size exponentially with depth.
The first cell depth is first_layer_thickness and should be
about 1/10th the shortest skin depth.
The layers then increase on a log scale to z_target_depth?
Further deep, the model is padded with pad_z number of cells to extend the depth of the model.
Note: Air-layers should NOT be added in this method, but when calling add_topography().
air layers are added on top of the model constructed here.
"""
# find the edges of the grid: bounding box of the survey area.
# first define some parameters. nc_extra_east and nc_extra_north is the number of cells outside the station
# area (but with same cell size as inner cells - not padding). pad_east and pad_north is
# number of padding cells, that increase with distance outward.
nc_extra_east, pad_east = self.pad_num, self.pad_east
nc_extra_north, pad_north = self.pad_num, self.pad_north
if self.cell_number_ew is None:
west = self.station_locations.rel_east.min() - self.cell_size_east * nc_extra_east
east = self.station_locations.rel_east.max() + self.cell_size_east * nc_extra_east
else:
self._logger.debug("user specified cell number in east-west mesh %s" %
self.cell_number_ew)
center_ew = 0.5 * (self.station_locations.rel_east.min() +
self.station_locations.rel_east.max())
cell_num = int(self.cell_number_ew / 2)
west = center_ew - self.cell_size_east * cell_num
east = center_ew + self.cell_size_east * cell_num
if self.cell_number_ns is None:
south = self.station_locations.rel_north.min() - \
self.cell_size_north * nc_extra_north
north = self.station_locations.rel_north.max() + \
self.cell_size_north * nc_extra_north
else:
self._logger.debug(
"user specified cell number in north-south mesh %s" %
self.cell_number_ns)
center_ns = self.station_locations.rel_north.min() + \
self.station_locations.rel_north.max()
center_ns = 0.5 * center_ns
cell_num = int(self.cell_number_ns / 2)
south = center_ns - self.cell_size_north * cell_num
north = center_ns + self.cell_size_north * cell_num
# rounding appropriately.
west_r = np.round(west, -2)
east_r = np.round(east, -2)
south_r = np.round(south, -2)
north_r = np.round(north, -2)
# # adjust center position (centre may be moved by rounding)
# self.Data.center_position_EN[0] += (westr + eastr - west - east)/2.
# self.Data.center_position_EN[1] += (southr + northr - south - north)/2.
# -------make a grid around the stations from the parameters above-----
# --> make grid in east-west direction
# cells within station area
east_grid_r = np.arange(start=west_r, stop=east_r + self.cell_size_east,
step=self.cell_size_east)
self._logger.debug("FZ: east_gridr = %s" % east_grid_r)
mean_egrid = np.mean(east_grid_r)
self._logger.info("mean_egrid = %s" % mean_egrid)
if self.data_obj.rotation_angle == 0:
self.data_obj.center_point['east'] -= mean_egrid
self.station_locations.station_locations['rel_east'] += mean_egrid
east_grid_r -= mean_egrid
self._logger.debug("FZ: east_gridr_2 shifted centre = %s" % east_grid_r)
# padding cells in the east-west direction
for ii in range(1, pad_east + 1):
east_0 = float(east_grid_r[-1])
west_0 = float(east_grid_r[0])
# add_size = mtcc.roundsf(self.cell_size_east * self.pad_stretch_h * ii, -2) # -2 round to decimal left
# round to the nearest 2 significant figures
add_size = mtcc.roundsf(self.cell_size_east * self.pad_stretch_h ** ii, 2)
pad_w = west_0 - add_size
pad_e = east_0 + add_size
east_grid_r = np.insert(east_grid_r, 0, pad_w)
east_grid_r = np.append(east_grid_r, pad_e)
# --> For some inversion code, need to make sure none of the stations lie on the nodes
# this section would make the cell-sizes become unequal
shift_station = 0.0 # originally = 0.02
for s_east in sorted(self.station_locations.rel_east):
try:
node_index = np.where(abs(s_east - east_grid_r) <
shift_station * self.cell_size_east)[0][0]
if s_east - east_grid_r[node_index] > 0:
east_grid_r[node_index] -= shift_station * self.cell_size_east
elif s_east - east_grid_r[node_index] < 0:
east_grid_r[node_index] += shift_station * self.cell_size_east
except IndexError:
continue
# --> make grid in north-south direction
# N-S cells with in station area
north_grid_r = np.arange(start=south_r, stop=north_r + self.cell_size_north,
step=self.cell_size_north)
if self.data_obj.rotation_angle == 0:
self.data_obj.center_point['north'] -= np.mean(north_grid_r)
self.station_locations.station_locations['rel_north'] += np.mean(north_grid_r)
north_grid_r -= np.mean(north_grid_r)
# padding cells in the east-west direction
for ii in range(1, pad_north + 1):
south_0 = float(north_grid_r[0])
north_0 = float(north_grid_r[-1])
# add_size = mtcc.roundsf(self.cell_size_north *self.pad_stretch_h * ii, -2)
add_size = mtcc.roundsf(self.cell_size_north * self.pad_stretch_h ** ii, 2)
pad_s = south_0 - add_size
pad_n = north_0 + add_size
north_grid_r = np.insert(north_grid_r, 0, pad_s)
north_grid_r = np.append(north_grid_r, pad_n)
# --> need to make sure none of the stations lie on the nodes
for s_north in sorted(self.station_locations.rel_north):
try:
node_index = np.where(abs(s_north - north_grid_r) <
shift_station * self.cell_size_north)[0][0]
if s_north - north_grid_r[node_index] > 0:
north_grid_r[node_index] -= shift_station * self.cell_size_north
elif s_north - north_grid_r[node_index] < 0:
north_grid_r[node_index] += shift_station * self.cell_size_north
except IndexError:
continue
(z_nodes, z_grid) = self.make_z_mesh_new()
# Need to make an array of the individual cell dimensions for modem
east_nodes = east_grid_r[1:] - east_grid_r[:-1]
north_nodes = north_grid_r[1:] - north_grid_r[:-1]
# compute grid center
center_east = -east_nodes.__abs__().sum() / 2
center_north = -north_nodes.__abs__().sum() / 2
center_z = 0
self.grid_center = np.array([center_north, center_east, center_z])
# make nodes/cells attributes
self.nodes_east = east_nodes
self.nodes_north = north_nodes
self.nodes_z = z_nodes
# make grid lines
self.grid_east = east_grid_r
self.grid_north = north_grid_r
self.grid_z = z_grid
# print ('self.nodes_z', self.nodes_z) # FZ: cell sizes
# print ('self.grid_z', self.grid_z) # FZ: grid location
# if desired, update the data center position (need to first project
# east/north back to lat/lon) and rewrite to file
if update_data_center: # update the data file's centre position, reprojected back to degrees
try:
self.data_obj.center_position = self.data_obj.project_xy(self.data_obj.center_position_EN[0],
self.data_obj.center_position_EN[1])
except:
pass
self._logger.info("writing a data file, without topo, nor air layers.")
self.data_obj.write_data_file(fill=False)
self.print_mesh_params()
def get_mesh_params(self):
#--> print out useful information
print '-'*15
print ' Number of stations = {0}'.format(len(self.station_locations.station))
print ' Dimensions: '
print ' e-w = {0}'.format(self.grid_east.size)
print ' n-s = {0}'.format(self.grid_north.size)
print ' z = {0} (without 7 air layers)'.format(self.grid_z.size)
print ' Extensions: '
print ' e-w = {0:.1f} (m)'.format(self.nodes_east.__abs__().sum())
print ' n-s = {0:.1f} (m)'.format(self.nodes_north.__abs__().sum())
print ' 0-z = {0:.1f} (m)'.format(self.nodes_z.__abs__().sum())
print ' Stations rotated by: {0:.1f} deg clockwise positive from N'.format(self.mesh_rotation_angle)
print ''
print ' ** Note ModEM does not accommodate mesh rotations, it assumes'
print ' all coordinates are aligned to geographic N, E'
print ' therefore rotating the stations will have a similar effect'
print ' as rotating the mesh.'
print '-'*15
def plot_mesh(self, east_limits=None, north_limits=None, z_limits=None,
**kwargs):
"""
Arguments:
----------
**east_limits** : tuple (xmin,xmax)
plot min and max distances in meters for the
E-W direction. If None, the east_limits
will be set to furthest stations east and west.
*default* is None
**north_limits** : tuple (ymin,ymax)
plot min and max distances in meters for the
N-S direction. If None, the north_limits
will be set to furthest stations north and south.
*default* is None
**z_limits** : tuple (zmin,zmax)
plot min and max distances in meters for the
vertical direction. If None, the z_limits is
set to the number of layers. Z is positive down
*default* is None
"""
fig_size = kwargs.pop('fig_size', [6, 6])
fig_dpi = kwargs.pop('fig_dpi', 300)
fig_num = kwargs.pop('fig_num', 1)
station_marker = kwargs.pop('station_marker', 'v')
marker_color = kwargs.pop('station_color', 'b')
marker_size = kwargs.pop('marker_size', 2)
line_color = kwargs.pop('line_color', 'k')
line_width = kwargs.pop('line_width', .5)
plt.rcParams['figure.subplot.hspace'] = .3
plt.rcParams['figure.subplot.wspace'] = .3
plt.rcParams['figure.subplot.left'] = .12
plt.rcParams['font.size'] = 7
fig = plt.figure(fig_num, figsize=fig_size, dpi=fig_dpi)
plt.clf()
#make a rotation matrix to rotate data
#cos_ang = np.cos(np.deg2rad(self.mesh_rotation_angle))
#sin_ang = np.sin(np.deg2rad(self.mesh_rotation_angle))
#turns out ModEM has not accomodated rotation of the grid, so for
#now we will not rotate anything.
cos_ang = 1
sin_ang = 0
#--->plot map view
ax1 = fig.add_subplot(1, 2, 1, aspect='equal')
#plot station locations
plot_east = self.station_locations.rel_east
plot_north = self.station_locations.rel_north
ax1.scatter(plot_east,
plot_north,
marker=station_marker,
c=marker_color,
s=marker_size)
east_line_xlist = []
east_line_ylist = []
north_min = self.grid_north.min()
north_max = self.grid_north.max()
for xx in self.grid_east:
east_line_xlist.extend([xx*cos_ang+north_min*sin_ang,
xx*cos_ang+north_max*sin_ang])
east_line_xlist.append(None)
east_line_ylist.extend([-xx*sin_ang+north_min*cos_ang,
-xx*sin_ang+north_max*cos_ang])
east_line_ylist.append(None)
ax1.plot(east_line_xlist,
east_line_ylist,
lw=line_width,
color=line_color)
north_line_xlist = []
north_line_ylist = []
east_max = self.grid_east.max()
east_min = self.grid_east.min()
for yy in self.grid_north:
north_line_xlist.extend([east_min*cos_ang+yy*sin_ang,
east_max*cos_ang+yy*sin_ang])
north_line_xlist.append(None)
north_line_ylist.extend([-east_min*sin_ang+yy*cos_ang,
-east_max*sin_ang+yy*cos_ang])
north_line_ylist.append(None)
ax1.plot(north_line_xlist,
north_line_ylist,
lw=line_width,
color=line_color)
if east_limits == None:
ax1.set_xlim(plot_east.min()-10*self.cell_size_east,
plot_east.max()+10*self.cell_size_east)
else:
ax1.set_xlim(east_limits)
if north_limits == None:
ax1.set_ylim(plot_north.min()-10*self.cell_size_north,
plot_north.max()+ 10*self.cell_size_east)
else:
ax1.set_ylim(north_limits)
ax1.set_ylabel('Northing (m)', fontdict={'size':9,'weight':'bold'})
ax1.set_xlabel('Easting (m)', fontdict={'size':9,'weight':'bold'})
##----plot depth view
ax2 = fig.add_subplot(1, 2, 2, aspect='auto', sharex=ax1)
#plot the grid
east_line_xlist = []
east_line_ylist = []
for xx in self.grid_east:
east_line_xlist.extend([xx, xx])
east_line_xlist.append(None)
east_line_ylist.extend([0,
self.grid_z.max()])
east_line_ylist.append(None)
ax2.plot(east_line_xlist,
east_line_ylist,
lw=line_width,
color=line_color)
z_line_xlist = []
z_line_ylist = []
for zz in self.grid_z:
z_line_xlist.extend([self.grid_east.min(),
self.grid_east.max()])
z_line_xlist.append(None)
z_line_ylist.extend([zz, zz])
z_line_ylist.append(None)
ax2.plot(z_line_xlist,
z_line_ylist,
lw=line_width,
color=line_color)
#--> plot stations
ax2.scatter(plot_east,
[0]*self.station_locations.station.size,
marker=station_marker,
c=marker_color,
s=marker_size)
if z_limits == None:
ax2.set_ylim(self.z_target_depth, -200)
else:
ax2.set_ylim(z_limits)
if east_limits == None:
ax1.set_xlim(plot_east.min()-10*self.cell_size_east,
plot_east.max()+10*self.cell_size_east)
else:
ax1.set_xlim(east_limits)
ax2.set_ylabel('Depth (m)', fontdict={'size':9, 'weight':'bold'})
ax2.set_xlabel('Easting (m)', fontdict={'size':9, 'weight':'bold'})
plt.show()
def write_model_file(self, **kwargs):
"""
will write an initial file for ModEM.
Note that x is assumed to be S --> N, y is assumed to be W --> E and
z is positive downwards. This means that index [0, 0, 0] is the
southwest corner of the first layer. Therefore if you build a model
by hand the layer block will look as it should in map view.
Also, the xgrid, ygrid and zgrid are assumed to be the relative
distance between neighboring nodes. This is needed because wsinv3d
builds the model from the bottom SW corner assuming the cell width
from the init file.
Key Word Arguments:
----------------------
**nodes_north** : np.array(nx)
block dimensions (m) in the N-S direction.
**Note** that the code reads the grid assuming that
index=0 is the southern most point.
**nodes_east** : np.array(ny)
block dimensions (m) in the E-W direction.
**Note** that the code reads in the grid assuming that
index=0 is the western most point.
**nodes_z** : np.array(nz)
block dimensions (m) in the vertical direction.
This is positive downwards.
**save_path** : string
Path to where the initial file will be saved
to savepath/model_fn_basename
**model_fn_basename** : string
basename to save file to
*default* is ModEM_Model.ws
file is saved at savepath/model_fn_basename
**title** : string
Title that goes into the first line
*default* is Model File written by MTpy.modeling.modem
**res_model** : np.array((nx,ny,nz))
Prior resistivity model.
.. note:: again that the modeling code
assumes that the first row it reads in is the southern
most row and the first column it reads in is the
western most column. Similarly, the first plane it
reads in is the Earth's surface.
**res_starting_value** : float
starting model resistivity value,
assumes a half space in Ohm-m
*default* is 100 Ohm-m
**res_scale** : [ 'loge' | 'log' | 'log10' | 'linear' ]
scale of resistivity. In the ModEM code it
converts everything to Loge,
*default* is 'loge'
"""
for key in kwargs.keys():
setattr(self, key, kwargs[key])
if self.save_path is not None:
self.model_fn = os.path.join(self.save_path,
self.model_fn_basename)
if self.model_fn is None:
if self.save_path is None:
self.save_path = os.getcwd()
self.model_fn = os.path.join(self.save_path,
self.model_fn_basename)
elif os.path.isdir(self.save_path) == True:
self.model_fn = os.path.join(self.save_path,
self.model_fn_basename)
else:
self.save_path = os.path.dirname(self.save_path)
self.model_fn= self.save_path
# get resistivity model
if self.res_model is None:
self.res_model = np.zeros((self.nodes_north.size,
self.nodes_east.size,
self.nodes_z.size))
self.res_model[:, :, :] = self.res_starting_value
elif type(self.res_model) in [float, int]:
self.res_starting_value = self.res_model
self.res_model = np.zeros((self.nodes_north.size,
self.nodes_east.size,
self.nodes_z.size))
self.res_model[:, :, :] = self.res_starting_value
#--> write file
ifid = file(self.model_fn, 'w')
ifid.write('# {0}\n'.format(self.title.upper()))
ifid.write('{0:>5}{1:>5}{2:>5}{3:>5} {4}\n'.format(self.nodes_north.size,
self.nodes_east.size,
self.nodes_z.size,
0,
self.res_scale.upper()))
#write S --> N node block
for ii, nnode in enumerate(self.nodes_north):
ifid.write('{0:>12.3f}'.format(abs(nnode)))
ifid.write('\n')
#write W --> E node block
for jj, enode in enumerate(self.nodes_east):
ifid.write('{0:>12.3f}'.format(abs(enode)))
ifid.write('\n')
#write top --> bottom node block
for kk, zz in enumerate(self.nodes_z):
ifid.write('{0:>12.3f}'.format(abs(zz)))
ifid.write('\n')
#write the resistivity in log e format
if self.res_scale.lower() == 'loge':
write_res_model = np.log(self.res_model[::-1, :, :])
elif self.res_scale.lower() == 'log' or \
self.res_scale.lower() == 'log10':
write_res_model = np.log10(self.res_model[::-1, :, :])
elif self.res_scale.lower() == 'linear':
write_res_model = self.res_model[::-1, :, :]
#write out the layers from resmodel
for zz in range(self.nodes_z.size):
ifid.write('\n')
for ee in range(self.nodes_east.size):
for nn in range(self.nodes_north.size):
ifid.write('{0:>13.5E}'.format(write_res_model[nn, ee, zz]))
ifid.write('\n')
if self.grid_center is None:
#compute grid center
center_east = -self.nodes_east.__abs__().sum()/2
center_north = -self.nodes_north.__abs__().sum()/2
center_z = 0
self.grid_center = np.array([center_north, center_east, center_z])
ifid.write('\n{0:>16.3f}{1:>16.3f}{2:>16.3f}\n'.format(self.grid_center[0],
self.grid_center[1], self.grid_center[2]))
if self.mesh_rotation_angle is None:
ifid.write('{0:>9.3f}\n'.format(0))
else:
ifid.write('{0:>9.3f}\n'.format(self.mesh_rotation_angle))
ifid.close()
print 'Wrote file to: {0}'.format(self.model_fn)
def read_model_file(self, model_fn=None, shift_grid=False):
"""
read an initial file and return the pertinent information including
grid positions in coordinates relative to the center point (0,0) and
starting model.
Note that the way the model file is output, it seems is that the
blocks are setup as
ModEM: WS:
---------- -----
0-----> N_north 0-------->N_east
| |
| |
V V
N_east N_north
Arguments:
----------
**model_fn** : full path to initializing file.
Outputs:
--------
**nodes_north** : np.array(nx)
array of nodes in S --> N direction
**nodes_east** : np.array(ny)
array of nodes in the W --> E direction
**nodes_z** : np.array(nz)
array of nodes in vertical direction positive downwards
**res_model** : dictionary
dictionary of the starting model with keys as layers
**res_list** : list
list of resistivity values in the model
**title** : string
title string
"""
if model_fn is not None:
self.model_fn = model_fn
if self.model_fn is None:
raise ModEMError('model_fn is None, input a model file name')
if os.path.isfile(self.model_fn) is None:
raise ModEMError('Cannot find {0}, check path'.format(self.model_fn))
self.save_path = os.path.dirname(self.model_fn)
ifid = file(self.model_fn, 'r')
ilines = ifid.readlines()
ifid.close()
self.title = ilines[0].strip()
#get size of dimensions, remembering that x is N-S, y is E-W, z is + down
nsize = ilines[1].strip().split()
n_north = int(nsize[0])
n_east = int(nsize[1])
n_z = int(nsize[2])
log_yn = nsize[4]
#get nodes
self.nodes_north = np.array([np.float(nn)
for nn in ilines[2].strip().split()])
self.nodes_east = np.array([np.float(nn)
for nn in ilines[3].strip().split()])
self.nodes_z = np.array([np.float(nn)
for nn in ilines[4].strip().split()])
self.res_model = np.zeros((n_north, n_east, n_z))
#get model
count_z = 0
line_index= 6
count_e = 0
while count_z < n_z:
iline = ilines[line_index].strip().split()
#blank lines spit the depth blocks, use those as a marker to
#set the layer number and start a new block
if len(iline) == 0:
count_z += 1
count_e = 0
line_index += 1
#each line in the block is a line of N-->S values for an east value
else:
north_line = np.array([float(nres) for nres in
ilines[line_index].strip().split()])
# Need to be sure that the resistivity array matches
# with the grids, such that the first index is the
# furthest south
self.res_model[:, count_e, count_z] = north_line[::-1]
count_e += 1
line_index += 1
#--> get grid center and rotation angle
if len(ilines) > line_index:
for iline in ilines[line_index:]:
ilist = iline.strip().split()
#grid center
if len(ilist) == 3:
self.grid_center = np.array(ilist, dtype=np.float)
#rotation angle
elif len(ilist) == 1:
self.rotation_angle = np.float(ilist[0])
else:
pass
#--> make sure the resistivity units are in linear Ohm-m
if log_yn.lower() == 'loge':
self.res_model = np.e**self.res_model
elif log_yn.lower() == 'log' or log_yn.lower() == 'log10':
self.res_model = 10**self.res_model
# center the grids
if self.grid_center is None:
self.grid_center = np.array([-self.nodes_north.sum()/2,
-self.nodes_east.sum()/2,
0.0])
# need to shift the grid if the center is not symmetric
shift_north = self.grid_center[0]+self.nodes_north.sum()/2
shift_east = self.grid_center[1]+self.nodes_east.sum()/2
# shift the grid. if shift is + then that means the center is
self.grid_north += shift_north
self.grid_east += shift_east
# get cell size
self.cell_size_east = stats.mode(self.nodes_east)[0][0]
self.cell_size_north = stats.mode(self.nodes_north)[0][0]
# get number of padding cells
self.pad_east = np.where(self.nodes_east[0:int(self.nodes_east.size/2)]
!= self.cell_size_east)[0][-1]
self.north_pad = np.where(self.nodes_north[0:int(self.nodes_north.size/2)]
!= self.cell_size_north)[0][-1]
def read_ws_model_file(self, ws_model_fn):
"""
reads in a WS3INV3D model file
"""
ws_model_obj = ws.WSModel(ws_model_fn)
ws_model_obj.read_model_file()
#set similar attributes
for ws_key in ws_model_obj.__dict__.keys():
for md_key in self.__dict__.keys():
if ws_key == md_key:
setattr(self, ws_key, ws_model_obj.__dict__[ws_key])
#compute grid center
center_east = -self.nodes_east.__abs__().sum()/2
center_north = -self.nodes_norths.__abs__().sum()/2
center_z = 0
self.grid_center = np.array([center_north, center_east, center_z])
def write_vtk_file(self, vtk_save_path=None,
vtk_fn_basename='ModEM_model_res'):
"""
write a vtk file to view in Paraview or other
Arguments:
-------------
**vtk_save_path** : string
directory to save vtk file to.
*default* is Model.save_path
**vtk_fn_basename** : string
filename basename of vtk file
*default* is ModEM_model_res, evtk will add
on the extension .vtr
"""
if vtk_save_path is None:
vtk_fn = os.path.join(self.save_path, vtk_fn_basename)
else:
vtk_fn = os.path.join(vtk_save_path, vtk_fn_basename)
# use cellData, this makes the grid properly as grid is n+1
gridToVTK(vtk_fn,
self.grid_north/1000.,
self.grid_east/1000.,
self.grid_z/1000.,
cellData={'resistivity':self.res_model})
print '-'*50
print '--> Wrote model file to {0}\n'.format(vtk_fn)
print '='*26
print ' model dimensions = {0}'.format(self.res_model.shape)
print ' * north {0}'.format(self.nodes_north.size)
print ' * east {0}'.format(self.nodes_east.size)
print ' * depth {0}'.format(self.nodes_z.size)
print '='*26
def get_parameters(self):
"""
get important model parameters to write to a file for documentation
later.
"""
parameter_list = ['cell_size_east',
'cell_size_north',
'ew_ext',
'ns_ext',
'pad_east',
'pad_north',
'pad_z',
'pad_num',
'z1_layer',
'z_target_depth',
'z_bottom',
'mesh_rotation_angle',
'res_starting_value',
'save_path']
parameter_dict = {}
for parameter in parameter_list:
key = 'model.{0}'.format(parameter)
parameter_dict[key] = getattr(self, parameter)
parameter_dict['model.size'] = self.res_model.shape
return parameter_dict
#--> read in ascii dem file
def read_dem_ascii(self, ascii_fn, cell_size=500, model_center=(0, 0),
rot_90=0, dem_rotation_angle=0):
"""
read in dem which is ascii format
The ascii format is assumed to be:
ncols 3601
nrows 3601
xllcorner -119.00013888889
yllcorner 36.999861111111
cellsize 0.00027777777777778
NODATA_value -9999
elevation data W --> E
N
|
V
S
"""
dfid = file(ascii_fn, 'r')
d_dict = {}
for ii in range(6):
dline = dfid.readline()
dline = dline.strip().split()
key = dline[0].strip().lower()
value = float(dline[1].strip())
d_dict[key] = value
x0 = d_dict['xllcorner']
y0 = d_dict['yllcorner']
nx = int(d_dict['ncols'])
ny = int(d_dict['nrows'])
cs = d_dict['cellsize']
# read in the elevation data
elevation = np.zeros((nx, ny))
for ii in range(1, int(ny)+2):
dline = dfid.readline()
if len(str(dline)) > 1:
#needs to be backwards because first line is the furthest north row.
elevation[:, -ii] = np.array(dline.strip().split(' '), dtype='float')
else:
break
dfid.close()
# create lat and lon arrays from the dem fle
lon = np.arange(x0, x0+cs*(nx), cs)
lat = np.arange(y0, y0+cs*(ny), cs)
# calculate the lower left and uper right corners of the grid in meters
ll_en = gis_tools.project_point_ll2utm(lat[0], lon[0])
ur_en = gis_tools.project_point_ll2utm(lat[-1], lon[-1])
# estimate cell sizes for each dem measurement
d_east = abs(ll_en[0]-ur_en[0])/nx
d_north = abs(ll_en[1]-ur_en[1])/ny
# calculate the number of new cells according to the given cell size
# if the given cell size and cs are similar int could make the value 0,
# hence the need to make it one if it is 0.
num_cells = max([1, int(cell_size/np.mean([d_east, d_north]))])
# make easting and northing arrays in meters corresponding to lat and lon
east = np.arange(ll_en[0], ur_en[0], d_east)
north = np.arange(ll_en[1], ur_en[1], d_north)
#resample the data accordingly
new_east = east[np.arange(0, east.size, num_cells)]
new_north = north[np.arange(0, north.size, num_cells)]
new_x, new_y = np.meshgrid(np.arange(0, east.size, num_cells),
np.arange(0, north.size, num_cells),
indexing='ij')
elevation = elevation[new_x, new_y]
# make any null values set to minimum elevation, could be dangerous
elevation[np.where(elevation == -9999.0)] = elevation[np.where(elevation != -9999.0)].min()
# estimate the shift of the DEM to relative model coordinates
mid_east = np.where(new_east >= model_center[0])[0][0]
mid_north = np.where(new_north >= model_center[1])[0][0]
new_east -= new_east[mid_east]
new_north -= new_north[mid_north]
# need to rotate cause I think I wrote the dem backwards
if rot_90 == 1 or rot_90 == 3:
elevation = np.rot90(elevation, rot_90)
else:
elevation = np.rot90(elevation, rot_90)
if dem_rotation_angle != 0.0:
cos_ang = np.cos(np.deg2rad(dem_rotation_angle))
sin_ang = np.sin(np.deg2rad(dem_rotation_angle))
rot_matrix = np.matrix(np.array([[cos_ang, sin_ang],
[-sin_ang, cos_ang]]))
new_coords = np.dot(rot_matrix, np.array([new_east, new_north]))
new_east = new_coords[0]
new_north = new_coords[1]
return new_east, new_north, elevation
def interpolate_elevation(self, elev_east, elev_north, elevation,
model_east, model_north, pad=3,
elevation_max=None):
"""
interpolate the elevation onto the model grid.
Arguments:
---------------
**elev_east** : np.ndarray(num_east_nodes)
easting grid for elevation model
**elev_north** : np.ndarray(num_north_nodes)
northing grid for elevation model
**elevation** : np.ndarray(num_east_nodes, num_north_nodes)
elevation model assumes x is east, y is north
Units are meters
**model_east** : np.ndarray(num_east_nodes_model)
relative easting grid of resistivity model
**model_north** : np.ndarray(num_north_nodes_model)
relative northin grid of resistivity model
**pad** : int
number of cells to repeat elevation model by. So for pad=3,
then the interpolated elevation model onto the resistivity
model grid will have the outer 3 cells will be repeats of
the adjacent cell. This is to extend the elevation model
to the resistivity model cause most elevation models will
not cover the entire area.
**elevation_max** : float
maximum value for elevation
*default* is None, which will use
elevation.max()
Returns:
--------------
**interp_elev** : np.ndarray(num_north_nodes_model, num_east_nodes_model)
the elevation model interpolated onto the resistivity
model grid.
"""
# set a maximum on the elevation, used to get rid of singular high
# points in the model
if type(elevation_max) in [float, int]:
max_find = np.where(elevation > float(elevation_max))
elevation[max_find] = elevation_max
# need to line up the elevation with the model
grid_east, grid_north = np.broadcast_arrays(elev_east[:, None],
elev_north[None, :])
# interpolate onto the model grid
interp_elev = spi.griddata((grid_east.ravel(), grid_north.ravel()),
elevation.ravel(),
(model_east[:, None],
model_north[None, :]),
method='linear',
fill_value=elevation.mean())
interp_elev[0:pad, pad:-pad] = interp_elev[pad, pad:-pad]
interp_elev[-pad:, pad:-pad] = interp_elev[-pad-1, pad:-pad]
interp_elev[:, 0:pad] = interp_elev[:, pad].repeat(pad).reshape(
interp_elev[:, 0:pad].shape)
interp_elev[:, -pad:] = interp_elev[:, -pad-1].repeat(pad).reshape(
interp_elev[:, -pad:].shape)
# transpose the modeled elevation to align with x=N, y=E
interp_elev = interp_elev.T
return interp_elev
def make_elevation_model(self, interp_elev, model_nodes_z,
elevation_cell=30, pad=3, res_air=1e12,
fill_res=100, res_sea=0.3):
"""
Take the elevation data of the interpolated elevation model and map that
onto the resistivity model by adding elevation cells to the existing model.
..Note: that if there are large elevation gains, the elevation cell size
might need to be increased.
Arguments:
-------------
**interp_elev** : np.ndarray(num_nodes_north, num_nodes_east)
elevation model that has been interpolated onto the
resistivity model grid. Units are in meters.
**model_nodes_z** : np.ndarray(num_z_nodes_of_model)
vertical nodes of the resistivity model without
topography. Note these are the nodes given in
relative thickness, not the grid, which is total
depth. Units are meters.
**elevation_cell** : float
height of elevation cells to be added on. These
are assumed to be the same at all elevations.
Units are in meters
**pad** : int
number of cells to look for maximum and minimum elevation.
So if you only want elevations within the survey area,
set pad equal to the number of padding cells of the
resistivity model grid.
**res_air** : float
resistivity of air. Default is 1E12 Ohm-m
**fill_res** : float
resistivity value of subsurface in Ohm-m.
Returns:
-------------
**elevation_model** : np.ndarray(num_north_nodes, num_east_nodes,
num_elev_nodes+num_z_nodes)
Model grid with elevation mapped onto it.
Where anything above the surface will be given the
value of res_air, everything else will be fill_res
**new_nodes_z** : np.ndarray(num_z_nodes+num_elev_nodes)
a new array of vertical nodes, where any nodes smaller
than elevation_cell will be set to elevation_cell.
This can be input into a modem.Model object to
rewrite the model file.
"""
# calculate the max elevation within survey area
elev_max = interp_elev[pad:-pad, pad:-pad].max()
# need to set sea level to 0 elevation
elev_min = max([0, interp_elev[pad:-pad, pad:-pad].min()])
# scale the interpolated elevations to fit within elev_max, elev_min
interp_elev[np.where(interp_elev > elev_max)] = elev_max
#interp_elev[np.where(interp_elev < elev_min)] = elev_min
# calculate the number of elevation cells needed
num_elev_cells = int((elev_max-elev_min)/elevation_cell)
print 'Number of elevation cells: {0}'.format(num_elev_cells)
# find sea level if it is there
if elev_min < 0:
sea_level_index = num_elev_cells-abs(int((elev_min)/elevation_cell))-1
else:
sea_level_index = num_elev_cells-1
print 'Sea level index is {0}'.format(sea_level_index)
# make an array of just the elevation for the model
# north is first index, east is second, vertical is third
elevation_model = np.ones((interp_elev.shape[0],
interp_elev.shape[1],
num_elev_cells+model_nodes_z.shape[0]))
elevation_model[:, :, :] = fill_res
# fill in elevation model with air values. Remeber Z is positive down, so
# the top of the model is the highest point and index 0 is highest
# elevation
for nn in range(interp_elev.shape[0]):
for ee in range(interp_elev.shape[1]):
# need to test for ocean
if interp_elev[nn, ee] < 0:
# fill in from bottom to sea level, then rest with air
elevation_model[nn, ee, 0:sea_level_index] = res_air
dz = sea_level_index+abs(int((interp_elev[nn, ee])/elevation_cell))+1
elevation_model[nn, ee, sea_level_index:dz] = res_sea
else:
dz = int((elev_max-interp_elev[nn, ee])/elevation_cell)
elevation_model[nn, ee, 0:dz] = res_air
# make new z nodes array
new_nodes_z = np.append(np.repeat(elevation_cell, num_elev_cells),
model_nodes_z)
new_nodes_z[np.where(new_nodes_z < elevation_cell)] = elevation_cell
return elevation_model, new_nodes_z
def add_topography_to_model(self, dem_ascii_fn, write_file=True,
model_center=(0,0), rot_90=0,
dem_rotation_angle=0, cell_size=500,
elev_cell=30, pad=1, elev_max=None):
"""
Add topography to an existing model from a dem in ascii format.
The ascii format is assumed to be:
ncols 3601
nrows 3601
xllcorner -119.00013888889
yllcorner 36.999861111111
cellsize 0.00027777777777778
NODATA_value -9999
elevation data W --> E
N
|
V
S
Arguments
-------------
**dem_ascii_fn** : string
full path to ascii dem file
**model_fn** : string
full path to existing ModEM model file
**model_center** : (east, north) in meters
Sometimes the center of the DEM and the center of the
model don't line up. Use this parameter to line
everything up properly.
**rot_90** : [ 0 | 1 | 2 | 3 ]
rotate the elevation model by rot_90*90 degrees. Sometimes
the elevation model is flipped depending on your coordinate
system.
**dem_rotation_angle: float (degrees from North)
rotation angle to rotate station locations
**cell_size** : float (meters)
horizontal cell size of grid to interpolate elevation
onto. This should be smaller or equal to the input
model cell size to be sure there is not spatial aliasing
**elev_cell** : float (meters)
vertical size of each elevation cell. This value should
be about 1/10th the smalles skin depth.
Returns
---------------
**new_model_fn** : string
full path to model file that contains topography
"""
### 1.) read in the dem and center it onto the resistivity model
e_east, e_north, elevation = self.read_dem_ascii(dem_ascii_fn,
cell_size=cell_size,
model_center=model_center,
rot_90=rot_90,
dem_rotation_angle=dem_rotation_angle)
### 2.) interpolate the elevation model onto the model grid
m_elev = self.interpolate_elevation(e_east, e_north, elevation,
self.grid_east, self.grid_north,
pad=pad, elevation_max=elev_max)
m_elev[np.where(m_elev == -9999.0)] = m_elev[np.where(m_elev != -9999.0)].min()
### 3.) make a resistivity model that incoorporates topography
mod_elev, elev_nodes_z = self.make_elevation_model(m_elev,
self.nodes_z,
elevation_cell=elev_cell)
### 4.) write new model file
self.nodes_z = elev_nodes_z
self.res_model = mod_elev
if write_file == True:
self.save_path = os.path.dirname(self.model_fn)
self.write_model_file(model_fn_basename='{0}_topo.rho'.format(
os.path.basename(self.model_fn)[0:-4]))
return self.model_fn
def assign_resistivity_from_surfacedata(self, surfacename, resistivity_value, where='above'):
"""
assign resistivity value to all points above or below a surface
requires the surface_dict attribute to exist and contain data for
surface key (can get this information from ascii file using
project_surface)
**inputs**
surfacename = name of surface (must correspond to key in surface_dict)
resistivity_value = value to assign
where = 'above' or 'below' - assign resistivity above or below the
surface
"""
# FZ: should ref-define the self.res_model if its shape has changed after topo air layer are added
gcz = np.mean([self.grid_z[:-1], self.grid_z[1:]], axis=0)
# logger.debug("gcz is the cells centre coordinates: %s, %s", len(gcz), gcz)
# convert to positive down, relative to the top of the grid
surfacedata = - self.surface_dict[surfacename]
# surfacedata = self.surface_dict[surfacename] - self.sea_level
# define topography, so that we don't overwrite cells above topography
# first check if topography exists
if 'topography' in self.surface_dict.keys():
# second, check topography isn't the surface we're trying to assign
# resistivity for
if surfacename == 'topography':
# if it is, we need to define the upper limit as the highest point in the surface
top = np.zeros_like(surfacedata) + np.amin(surfacedata) - 1.
else:
# if not, upper limit of resistivity assignment is the topography, note positive downwards
top = -self.surface_dict['topography']
# if no topography, use top of model
else:
top = self.grid_z[0] + np.zeros_like(surfacedata)
# assign resistivity value
for j in range(len(self.res_model)):
for i in range(len(self.res_model[j])):
if where == 'above':
# needs to be above the surface but below the top (as defined before)
ii = np.where((gcz <= surfacedata[j, i]) & ( gcz > top[j, i]))[0]
else: # for below the surface
ii = np.where(gcz > surfacedata[j, i])[0]
self.res_model[j, i, ii] = resistivity_value
if surfacename == 'topography':
iisea = np.where((gcz <= surfacedata[j, i]) & ( gcz > 0.))[0]
self.res_model[j, i, iisea] = 0.3
print j,i,ii
def interpolate_elevation2(self, surfacefile=None, surface=None, surfacename=None,
method='nearest'):
"""
project a surface to the model grid and add resulting elevation data
to a dictionary called surface_dict. Assumes the surface is in lat/long
coordinates (wgs84)
**returns**
nothing returned, but surface data are added to surface_dict under
the key given by surfacename.
**inputs**
choose to provide either surface_file (path to file) or surface (tuple).
If both are provided then surface tuple takes priority.
surface elevations are positive up, and relative to sea level.
surface file format is:
ncols 3601
nrows 3601
xllcorner -119.00013888889 (longitude of lower left)
yllcorner 36.999861111111 (latitude of lower left)
cellsize 0.00027777777777778
NODATA_value -9999
elevation data W --> E
N
|
V
S
Alternatively, provide a tuple with:
(lon,lat,elevation)
where elevation is a 2D array (shape (ny,nx)) containing elevation
points (order S -> N, W -> E)
and lon, lat are either 1D arrays containing list of longitudes and
latitudes (in the case of a regular grid) or 2D arrays with same shape
as elevation array containing longitude and latitude of each point.
other inputs:
surfacename = name of surface for putting into dictionary
surface_epsg = epsg number of input surface, default is 4326 for lat/lon(wgs84)
method = interpolation method. Default is 'nearest', if model grid is
dense compared to surface points then choose 'linear' or 'cubic'
"""
# initialise a dictionary to contain the surfaces
if not hasattr(self, 'surface_dict'):
self.surface_dict = {}
# read the surface data in from ascii if surface not provided
if surface is None:
surface = mtfh.read_surface_ascii(surfacefile)
x, y, elev = surface
# if lat/lon provided as a 1D list, convert to a 2d grid of points
if len(x.shape) == 1:
x, y = np.meshgrid(x, y)
xs, ys, utm_zone = gis_tools.project_points_ll2utm(y,x,
epsg=self.station_locations.model_epsg,
utm_zone=self.station_locations.model_utm_zone
)
# get centre position of model grid in real world coordinates
x0, y0 = [np.median(self.station_locations.station_locations[dd] - self.station_locations.station_locations['rel_' + dd]) for dd in
['east', 'north']]
# centre points of model grid in real world coordinates
xg, yg = [np.mean([arr[1:], arr[:-1]], axis=0)
for arr in [self.grid_east + x0, self.grid_north + y0]]
# elevation in model grid
# first, get lat,lon points of surface grid
points = np.vstack([arr.flatten() for arr in [xs, ys]]).T
# corresponding surface elevation points
values = elev.flatten()
# xi, the model grid points to interpolate to
xi = np.vstack([arr.flatten() for arr in np.meshgrid(xg, yg)]).T
# elevation on the centre of the grid nodes
elev_mg = spi.griddata(
points, values, xi, method=method).reshape(len(yg), len(xg))
print(" Elevation data type and shape *** ", type(elev_mg), elev_mg.shape, len(yg), len(xg))
# <type 'numpy.ndarray'> (65, 92), 65 92: it's 2D image with cell index as pixels
# np.savetxt('E:/tmp/elev_mg.txt', elev_mg, fmt='%10.5f')
# get a name for surface
if surfacename is None:
if surfacefile is not None:
surfacename = os.path.basename(surfacefile)
else:
ii = 1
surfacename = 'surface%01i' % ii
while surfacename in self.surface_dict.keys():
ii += 1
surfacename = 'surface%01i' % ii
# add surface to a dictionary of surface elevation data
self.surface_dict[surfacename] = elev_mg
return
def add_topography_to_model2(self, topographyfile=None, topographyarray=None, interp_method='nearest',
air_resistivity=1e12, sea_resistivity=0.3, airlayer_cellsize=None):
"""
if air_layers is non-zero, will add topo: read in topograph file, make a surface model.
Call project_stations_on_topography in the end, which will re-write the .dat file.
If n_airlayers is zero, then cannot add topo data, only bathymetry is needed.
"""
# first, get surface data
if topographyfile is not None:
self.interpolate_elevation2(surfacefile=topographyfile,
surfacename='topography',
method=interp_method)
if topographyarray is not None:
self.surface_dict['topography'] = topographyarray
if self.n_airlayers is None or self.n_airlayers == 0:
print("No air layers specified, so will not add air/topography !!!")
print("Only bathymetry will be added below according to the topofile: sea-water low resistivity!!!")
elif self.n_airlayers > 0: # FZ: new logic, add equal blocksize air layers on top of the simple flat-earth grid
# build air layers based on the inner core area
padE = self.pad_east
padN = self.pad_north
# topo_core = self.surface_dict['topography'][padN:-padN,padE:-padE]
core_cells = mtmesh.get_station_buffer(self.grid_east,
self.grid_north,
self.station_locations.station_locations['rel_east'],
self.station_locations.station_locations['rel_north'],
buf = 5*(self.cell_size_east*2 + self.cell_size_north**2)**0.5)
topo_core = topo_core = self.surface_dict['topography'][core_cells]
# log increasing airlayers, in reversed order
new_air_nodes = mtmesh.make_log_increasing_array(self.z1_layer,
topo_core.max() - topo_core.min(),
self.n_airlayers + 1,
increment_factor=0.999)[::-1]
# sum to get grid cell locations
new_airlayers = np.array([new_air_nodes[:ii].sum() for ii in range(len(new_air_nodes)+1)])
# round to nearest whole number and reverse the order
new_airlayers = np.around(new_airlayers - topo_core.max())
print("new_airlayers", new_airlayers)
print("self.grid_z[0:2]", self.grid_z[0:2])
# add new air layers, cut_off some tailing layers to preserve array size.
# self.grid_z = np.concatenate([new_airlayers, self.grid_z[self.n_airlayers+1:] - self.grid_z[self.n_airlayers] + new_airlayers[-1]], axis=0)
self.grid_z = np.concatenate([new_airlayers[:-1], self.grid_z + new_airlayers[-1]], axis=0)
# print(" NEW self.grid_z shape and values = ", self.grid_z.shape, self.grid_z)
# print self.grid_z
# update the z-centre as the top air layer
self.grid_center[2] = self.grid_z[0]
# update the resistivity model
new_res_model = np.ones((self.nodes_north.size,
self.nodes_east.size,
self.nodes_z.size))*self.res_starting_value
new_res_model[:,:,self.n_airlayers+1:] = self.res_model
self.res_model = new_res_model
# logger.info("begin to self.assign_resistivity_from_surfacedata(...)")
self.assign_resistivity_from_surfacedata('topography', air_resistivity, where='above')
## logger.info("begin to assign sea water resistivity")
# # first make a mask for all-land =1, which will be modified later according to air, water
# self.covariance_mask = np.ones_like(self.res_model) # of grid size (xc, yc, zc)
#
# # assign model areas below sea level but above topography, as seawater
# # get grid node centres
# gcz = np.mean([self.grid_z[:-1], self.grid_z[1:]], axis=0)
#
# # convert topography to local grid coordinates
# topo = -self.surface_dict['topography']
# # assign values
# for j in range(len(self.res_model)):
# for i in range(len(self.res_model[j])):
# # assign all sites above the topography to air
# ii1 = np.where(gcz <= topo[j, i])[0]
# if len(ii1) > 0:
# self.covariance_mask[j, i, ii1] = 0.
# # assign sea water to covariance and model res arrays
# ii = np.where(
# np.all([gcz > 0., gcz <= topo[j, i]], axis=0))[0]
# if len(ii) > 0:
# self.covariance_mask[j, i, ii] = 9.
# self.res_model[j, i, ii] = sea_resistivity
# print "assigning sea", j, i, ii
#
# self.covariance_mask = self.covariance_mask[::-1]
# self.station_grid_index = self.project_stations_on_topography()
# logger.debug("NEW res_model and cov_mask shapes: %s, %s", self.res_model.shape, self.covariance_mask.shape)
return
#==============================================================================
# Residuals
#==============================================================================
class Residual():
"""
class to contain residuals for each data point, and rms values for each
station
====================== ====================================================
Attributes/Key Words Description
====================== ====================================================
center_position_EN (east, north, evel) for center point of station
array. All stations are relative to this location
for plotting purposes.
rms_array numpy.ndarray structured to store station
location values and rms. Keys are:
* station --> station name
* east --> UTM east (m)
* north --> UTM north (m)
* lat --> latitude in decimal degrees
* lon --> longitude in decimal degrees
* elev --> elevation (m)
* zone --> UTM zone
* rel_east -- > relative east location to
center_position (m)
* rel_north --> relative north location to
center_position (m)
* rms --> root-mean-square residual for each
station
residual_array numpy.ndarray (num_stations) structured to store
data. keys are:
* station --> station name
* lat --> latitude in decimal degrees
* lon --> longitude in decimal degrees
* elev --> elevation (m)
* rel_east -- > relative east location to
center_position (m)
* rel_north --> relative north location to
center_position (m)
* east --> UTM east (m)
* north --> UTM north (m)
* zone --> UTM zone
* z --> impedance tensor residual (measured - modelled)
(num_freq, 2, 2)
* z_err --> impedance tensor error array with
shape (num_freq, 2, 2)
* tip --> Tipper residual (measured - modelled)
(num_freq, 1, 2)
* tipperr --> Tipper array with shape
(num_freq, 1, 2)
residual_fn full path to data file
data_period_list period list from all the data
fn_basename basename of residual file
header_strings strings for header of data file following the format
outlined in the ModEM documentation
inv_comp_dict dictionary of inversion componets
inv_mode inversion mode, options are: *default* is '1'
* '1' --> for 'Full_Impedance' and
'Full_Vertical_Components'
* '2' --> 'Full_Impedance'
* '3' --> 'Off_Diagonal_Impedance' and
'Full_Vertical_Components'
* '4' --> 'Off_Diagonal_Impedance'
* '5' --> 'Full_Vertical_Components'
* '6' --> 'Full_Interstation_TF'
* '7' --> 'Off_Diagonal_Rho_Phase'
inv_mode_dict dictionary for inversion modes
mt_dict dictionary of mtpy.core.mt.MT objects with keys
being station names
units [ [V/m]/[T] | [mV/km]/[nT] | Ohm ] units of Z
*default* is [mV/km]/[nT]
wave_sign [ + | - ] sign of time dependent wave.
*default* is '+' as positive downwards.
====================== ====================================================
"""
def __init__(self, **kwargs):
self.workdir = kwargs.pop('workdir','.')
self.residual_fn = kwargs.pop('residual_fn', None)
def read_residual_file(self,residual_fn=None):
if residual_fn is not None:
self.residual_fn = residual_fn
resObj = Data()
resObj.read_data_file(self.residual_fn)
else:
print "Cannot read residuals, please provide residual_fn"
return
# pass relevant arguments through residual object
for att in ['center_position_EN','data_period_list',
'wave_sign_impedance','wave_sign_tipper']:
if hasattr(resObj,att):
setattr(self,att,getattr(resObj,att))
# define new data types for residual arrays by copying/modifying dtype from data object
self.residual_array = resObj.data_array.copy()
# append some new fields to contain rms values
self.rms_array = resObj.station_locations.copy()
for fieldname in ['rms','rms_z','rms_tip']:
self.rms_array = recfunctions.append_fields(self.rms_array.copy(),
fieldname,
np.zeros(len(resObj.station_locations)),
usemask=False)
def get_rms(self,residual_fn=None):
if self.residual_array is None:
self._read_residual_fn()
if self.residual_array is None:
return
rms_z_comp = np.zeros((len(self.rms_array),2,2))
rms_tip_comp = np.zeros((len(self.rms_array),2))
rms_valuelist_all = np.zeros(0)
rms_valuelist_z = np.zeros(0)
rms_valuelist_tip = np.zeros(0)
for stname in self.rms_array['station']:
rms_valuelist = []
sta_ind = np.where(self.rms_array['station']==stname)[0][0]
sta_indd = np.where(self.residual_array['station']==stname)[0][0]
resvals = self.residual_array[sta_indd]
znorm,tipnorm = None,None
if np.amax(np.abs(resvals['z'])) > 0:
# sum over absolute value of z
# need to divide by sqrt(2) to normalise (code applies same error to real and imag components)
znorm = np.abs(resvals['z'])/(np.real(resvals['z_err'])*2.**0.5)
znorm = znorm[np.all(np.isfinite(znorm),axis=(1,2))]
# append individual normalised errors to a master list for all stations
rms_valuelist_all = np.append(rms_valuelist_all,znorm.flatten())
rms_valuelist_z = np.append(rms_valuelist_z,znorm.flatten())
# normalised error for separate components
rms_z_comp[sta_ind] = (((znorm**2.).sum(axis=0))/(znorm.shape[0]))**0.5
rms_valuelist.append(rms_z_comp[sta_ind])
if np.amax(np.abs(resvals['tip'])) > 0:
# sum over absolute value of tipper
# need to divide by sqrt(2) to normalise (code applies same error to real and imag components)
tipnorm = np.abs(resvals['tip'])/(np.real(resvals['tip_err'])*2.**0.5)
tipnorm = tipnorm[np.all(np.isfinite(tipnorm),axis=(1,2))]
# append individual normalised errors to a master list for all stations
rms_valuelist_all = np.append(rms_valuelist_all,tipnorm.flatten())
rms_valuelist_tip = np.append(rms_valuelist_tip,tipnorm.flatten())
# normalised error for separate components
rms_tip_comp[sta_ind] = (((tipnorm**2.).sum(axis=0))/len(tipnorm))**0.5
rms_valuelist.append(rms_tip_comp[sta_ind])
rms_valuelist = np.vstack(rms_valuelist).flatten()
rms_value = ((rms_valuelist**2.).sum()/rms_valuelist.size)**0.5
self.rms_array[sta_ind]['rms'] = rms_value
if znorm is not None:
self.rms_array[sta_ind]['rms_z'] = ((rms_z_comp[sta_ind]**2.).sum()/rms_z_comp[sta_ind].size)**0.5
if tipnorm is not None:
self.rms_array[sta_ind]['rms_tip'] = ((rms_tip_comp[sta_ind]**2.).sum()/rms_z_comp[sta_ind].size)**0.5
self.rms = np.mean(rms_valuelist_all**2.)**0.5
self.rms_z = np.mean(rms_valuelist_z**2.)**0.5
self.rms_tip = np.mean(rms_valuelist_tip**2.)**0.5
def write_rms_to_file(self):
"""
write rms station data to file
"""
fn = op.join(self.workdir,'rms_values.dat')
if not hasattr(self,'rms'):
self.get_rms()
headerlist = ['station','lon','lat','rel_east','rel_north','rms','rms_z','rms_tip']
dtype = []
for val in headerlist:
if val == 'station':
dtype.append((val,'S10'))
else:
dtype.append((val,np.float))
savelist = np.zeros(len(self.rms_array),dtype=dtype)
for val in headerlist:
savelist[val] = self.rms_array[val]
header = ' '.join(headerlist)
np.savetxt(fn,savelist,header=header,fmt=['%s','%.6f','%.6f','%.1f','%.1f','%.3f','%.3f','%.3f'])
#==============================================================================
# Control File for inversion
#==============================================================================
class Control_Inv(object):
"""
read and write control file for how the inversion starts and how it is run
"""
def __init__(self, **kwargs):
self.output_fn = kwargs.pop('output_fn', 'MODULAR_NLCG')
self.lambda_initial = kwargs.pop('lambda_initial', 10)
self.lambda_step = kwargs.pop('lambda_step', 10)
self.model_search_step = kwargs.pop('model_search_step', 1)
self.rms_reset_search = kwargs.pop('rms_reset_search', 2.0e-3)
self.rms_target = kwargs.pop('rms_target', 1.05)
self.lambda_exit = kwargs.pop('lambda_exit', 1.0e-4)
self.max_iterations = kwargs.pop('max_iterations', 100)
self.save_path = kwargs.pop('save_path', os.getcwd())
self.fn_basename = kwargs.pop('fn_basename', 'control.inv')
self.control_fn = kwargs.pop('control_fn', os.path.join(self.save_path,
self.fn_basename))
self._control_keys = ['Model and data output file name',
'Initial damping factor lambda',
'To update lambda divide by',
'Initial search step in model units',
'Restart when rms diff is less than',
'Exit search when rms is less than',
'Exit when lambda is less than',
'Maximum number of iterations']
self._control_dict = dict([(key, value)
for key, value in zip(self._control_keys,
[self.output_fn, self.lambda_initial,
self.lambda_step, self.model_search_step,
self.rms_reset_search, self.rms_target,
self.lambda_exit, self.max_iterations])])
self._string_fmt_dict = dict([(key, value)
for key, value in zip(self._control_keys,
['<', '<.1f', '<.1f', '<.1f', '<.1e',
'<.2f', '<.1e', '<.0f'])])
def write_control_file(self, control_fn=None, save_path=None,
fn_basename=None):
"""
write control file
Arguments:
------------
**control_fn** : string
full path to save control file to
*default* is save_path/fn_basename
**save_path** : string
directory path to save control file to
*default* is cwd
**fn_basename** : string
basename of control file
*default* is control.inv
"""
if control_fn is not None:
self.save_path = os.path.dirname(control_fn)
self.fn_basename = os.path.basename(control_fn)
if save_path is not None:
self.save_path = save_path
if fn_basename is not None:
self.fn_basename = fn_basename
self.control_fn = os.path.join(self.save_path, self.fn_basename)
self._control_dict = dict([(key, value)
for key, value in zip(self._control_keys,
[self.output_fn, self.lambda_initial,
self.lambda_step, self.model_search_step,
self.rms_reset_search, self.rms_target,
self.lambda_exit, self.max_iterations])])
clines = []
for key in self._control_keys:
value = self._control_dict[key]
str_fmt = self._string_fmt_dict[key]
clines.append('{0:<35}: {1:{2}}\n'.format(key, value, str_fmt))
cfid = file(self.control_fn, 'w')
cfid.writelines(clines)
cfid.close()
print 'Wrote ModEM control file to {0}'.format(self.control_fn)
def read_control_file(self, control_fn=None):
"""
read in a control file
"""
if control_fn is not None:
self.control_fn = control_fn
if self.control_fn is None:
raise mtex.MTpyError_file_handling('control_fn is None, input '
'control file')
if os.path.isfile(self.control_fn) is False:
raise mtex.MTpyError_file_handling('Could not find {0}'.format(
self.control_fn))
self.save_path = os.path.dirname(self.control_fn)
self.fn_basename = os.path.basename(self.control_fn)
cfid = file(self.control_fn, 'r')
clines = cfid.readlines()
cfid.close()
for cline in clines:
clist = cline.strip().split(':')
if len(clist) == 2:
try:
self._control_dict[clist[0].strip()] = float(clist[1])
except ValueError:
self._control_dict[clist[0].strip()] = clist[1]
#set attributes
attr_list = ['output_fn', 'lambda_initial','lambda_step',
'model_search_step','rms_reset_search','rms_target',
'lambda_exit','max_iterations']
for key, kattr in zip(self._control_keys, attr_list):
setattr(self, kattr, self._control_dict[key])
#==============================================================================
# Control File for inversion
#==============================================================================
class Control_Fwd(object):
"""
read and write control file for
This file controls how the inversion starts and how it is run
"""
def __init__(self, **kwargs):
self.num_qmr_iter = kwargs.pop('num_qmr_iter', 40)
self.max_num_div_calls = kwargs.pop('max_num_div_calls', 20)
self.max_num_div_iters = kwargs.pop('max_num_div_iters', 100)
self.misfit_tol_fwd = kwargs.pop('misfit_tol_fwd', 1.0e-7)
self.misfit_tol_adj = kwargs.pop('misfit_tol_adj', 1.0e-7)
self.misfit_tol_div = kwargs.pop('misfit_tol_div', 1.0e-5)
self.save_path = kwargs.pop('save_path', os.getcwd())
self.fn_basename = kwargs.pop('fn_basename', 'control.fwd')
self.control_fn = kwargs.pop('control_fn', os.path.join(self.save_path,
self.fn_basename))
self._control_keys = ['Number of QMR iters per divergence correction',
'Maximum number of divergence correction calls',
'Maximum number of divergence correction iters',
'Misfit tolerance for EM forward solver',
'Misfit tolerance for EM adjoint solver',
'Misfit tolerance for divergence correction']
self._control_dict = dict([(key, value)
for key, value in zip(self._control_keys,
[self.num_qmr_iter,
self.max_num_div_calls,
self.max_num_div_iters,
self.misfit_tol_fwd,
self.misfit_tol_adj,
self.misfit_tol_div])])
self._string_fmt_dict = dict([(key, value)
for key, value in zip(self._control_keys,
['<.0f', '<.0f', '<.0f', '<.1e', '<.1e',
'<.1e'])])
def write_control_file(self, control_fn=None, save_path=None,
fn_basename=None):
"""
write control file
Arguments:
------------
**control_fn** : string
full path to save control file to
*default* is save_path/fn_basename
**save_path** : string
directory path to save control file to
*default* is cwd
**fn_basename** : string
basename of control file
*default* is control.inv
"""
if control_fn is not None:
self.save_path = os.path.dirname(control_fn)
self.fn_basename = os.path.basename(control_fn)
if save_path is not None:
self.save_path = save_path
if fn_basename is not None:
self.fn_basename = fn_basename
self.control_fn = os.path.join(self.save_path, self.fn_basename)
self._control_dict = dict([(key, value)
for key, value in zip(self._control_keys,
[self.num_qmr_iter,
self.max_num_div_calls,
self.max_num_div_iters,
self.misfit_tol_fwd,
self.misfit_tol_adj,
self.misfit_tol_div])])
clines = []
for key in self._control_keys:
value = self._control_dict[key]
str_fmt = self._string_fmt_dict[key]
clines.append('{0:<47}: {1:{2}}\n'.format(key, value, str_fmt))
cfid = file(self.control_fn, 'w')
cfid.writelines(clines)
cfid.close()
print 'Wrote ModEM control file to {0}'.format(self.control_fn)
def read_control_file(self, control_fn=None):
"""
read in a control file
"""
if control_fn is not None:
self.control_fn = control_fn
if self.control_fn is None:
raise mtex.MTpyError_file_handling('control_fn is None, input '
'control file')
if os.path.isfile(self.control_fn) is False:
raise mtex.MTpyError_file_handling('Could not find {0}'.format(
self.control_fn))
self.save_path = os.path.dirname(self.control_fn)
self.fn_basename = os.path.basename(self.control_fn)
cfid = file(self.control_fn, 'r')
clines = cfid.readlines()
cfid.close()
for cline in clines:
clist = cline.strip().split(':')
if len(clist) == 2:
try:
self._control_dict[clist[0].strip()] = float(clist[1])
except ValueError:
self._control_dict[clist[0].strip()] = clist[1]
#set attributes
attr_list = ['num_qmr_iter','max_num_div_calls', 'max_num_div_iters',
'misfit_tol_fwd', 'misfit_tol_adj', 'misfit_tol_div']
for key, kattr in zip(self._control_keys, attr_list):
setattr(self, kattr, self._control_dict[key])
#==============================================================================
# covariance
#==============================================================================
class Covariance(object):
"""
read and write covariance files
"""
def __init__(self, grid_dimensions=None, **kwargs):
self.grid_dimensions = grid_dimensions
self.smoothing_east = 0.3
self.smoothing_north = 0.3
self.smoothing_z = 0.3
self.smoothing_num = 1
self.exception_list = []
self.mask_arr = None
self.save_path = os.getcwd()
self.cov_fn_basename = 'covariance.cov'
self.cov_fn = None
self._header_str = '\n'.join(['+{0}+'.format('-'*77),
'| This file defines model covariance for a recursive autoregression scheme. |',
'| The model space may be divided into distinct areas using integer masks. |',
'| Mask 0 is reserved for air; mask 9 is reserved for ocean. Smoothing between |',
'| air, ocean and the rest of the model is turned off automatically. You can |',
'| also define exceptions to override smoothing between any two model areas. |',
'| To turn off smoothing set it to zero. This header is 16 lines long. |',
'| 1. Grid dimensions excluding air layers (Nx, Ny, NzEarth) |',
'| 2. Smoothing in the X direction (NzEarth real values) |',
'| 3. Smoothing in the Y direction (NzEarth real values) |',
'| 4. Vertical smoothing (1 real value) |',
'| 5. Number of times the smoothing should be applied (1 integer >= 0) |',
'| 6. Number of exceptions (1 integer >= 0) |',
'| 7. Exceptions in the for e.g. 2 3 0. (to turn off smoothing between 3 & 4) |',
'| 8. Two integer layer indices and Nx x Ny block of masks, repeated as needed.|',
'+{0}+'.format('-'*77)])
for key in kwargs.keys():
setattr(self, key, kwargs[key])
def write_covariance_file(self, cov_fn=None, save_path=None,
cov_fn_basename=None, model_fn=None,
sea_water=0.3, air=1e12):
"""
write a covariance file
"""
if model_fn is not None:
mod_obj = Model()
mod_obj.read_model_file(model_fn)
# update save_path from model path if not provided separately
if save_path is None:
save_path = os.path.dirname(model_fn)
print 'Reading {0}'.format(model_fn)
self.grid_dimensions = mod_obj.res_model.shape
self.mask_arr = np.ones_like(mod_obj.res_model)
self.mask_arr[np.where(mod_obj.res_model >= air*.9)] = 0
self.mask_arr[np.where((mod_obj.res_model <= sea_water*1.1) &
(mod_obj.res_model >= sea_water*.9))] = 9
if self.grid_dimensions is None:
raise ModEMError('Grid dimensions are None, input as (Nx, Ny, Nz)')
if cov_fn is not None:
self.cov_fn = cov_fn
else:
if save_path is not None:
self.save_path = save_path
if cov_fn_basename is not None:
self.cov_fn_basename = cov_fn_basename
self.cov_fn = os.path.join(self.save_path, self.cov_fn_basename)
clines = [self._header_str]
clines.append('\n\n')
#--> grid dimensions
clines.append(' {0:<10}{1:<10}{2:<10}\n'.format(self.grid_dimensions[0],
self.grid_dimensions[1],
self.grid_dimensions[2]))
clines.append('\n')
#--> smoothing in north direction
n_smooth_line = ''
for zz in range(self.grid_dimensions[2]):
n_smooth_line += ' {0:<5.1f}'.format(self.smoothing_north)
clines.append(n_smooth_line+'\n')
#--> smoothing in east direction
e_smooth_line = ''
for zz in range(self.grid_dimensions[2]):
e_smooth_line += ' {0:<5.1f}'.format(self.smoothing_east)
clines.append(e_smooth_line+'\n')
#--> smoothing in vertical direction
clines.append(' {0:<5.1f}\n'.format(self.smoothing_z))
clines.append('\n')
#--> number of times to apply smoothing
clines.append(' {0:<2.0f}\n'.format(self.smoothing_num))
clines.append('\n')
#--> exceptions
clines.append(' {0:<.0f}\n'.format(len(self.exception_list)))
for exc in self.exception_list:
clines.append('{0:<5.0f}{1:<5.0f}{2:<5.0f}\n'.format(exc[0],
exc[1],
exc[2]))
clines.append('\n')
clines.append('\n')
#--> mask array
if self.mask_arr is None:
self.mask_arr = np.ones((self.grid_dimensions[0],
self.grid_dimensions[1],
self.grid_dimensions[2]))
# need to flip north and south.
write_mask_arr = self.mask_arr[::-1, :, :].copy()
for zz in range(self.mask_arr.shape[2]):
clines.append(' {0:<8.0f}{0:<8.0f}\n'.format(zz+1))
for nn in range(self.mask_arr.shape[0]):
cline = ''
for ee in range(self.mask_arr.shape[1]):
cline += '{0:^3.0f}'.format(write_mask_arr[nn, ee, zz])
clines.append(cline+'\n')
cfid = file(self.cov_fn, 'w')
cfid.writelines(clines)
cfid.close()
print 'Wrote covariance file to {0}'.format(self.cov_fn)
def read_cov_file(self, cov_fn):
"""
read a covariance file
"""
if not os.path.isfile(cov_fn):
raise ModEMError('{0} not found, check path'.format(cov_fn))
self.cov_fn = cov_fn
self.save_path = os.path.dirname(self.cov_fn)
self.cov_fn_basename = os.path.basename(self.cov_fn)
with open(cov_fn, 'r') as fid:
lines = fid.readlines()
num_find = False
east_find = False
north_find = False
count = 0
for line in lines:
if line.find('+') >= 0 or line.find('|') >= 0:
continue
else:
line_list = line.strip().split()
if len(line_list) == 0:
continue
elif len(line_list) == 1 and num_find == False and \
line_list[0].find('.') == -1:
self.smoothing_num = int(line_list[0])
num_find = True
elif len(line_list) == 1 and num_find == True and \
line_list[0].find('.') == -1:
self.exceptions_num = int(line_list[0])
elif len(line_list) == 1 and line_list[0].find('.') >= 0:
self.smoothing_z = float(line_list[0])
elif len(line_list) == 3:
nx, ny, nz = [int(ii) for ii in line_list]
self.grid_dimensions = (nx, ny, nz)
self.mask_arr = np.ones((nx, ny, nz), dtype=np.int)
self.smoothing_east = np.zeros(ny)
self.smoothing_north = np.zeros(nx)
elif len(line_list) == 2:
# starts at 1 but python starts at 0
index_00, index_01 = [int(ii)-1 for ii in line_list]
count = 0
elif line_list[0].find('.') >= 0 and north_find == False:
self.smoothing_north = np.array(line_list, dtype=np.float)
north_find = True
elif line_list[0].find('.') >= 0 and north_find == True:
self.smoothing_east = np.array(line_list, dtype=np.float)
east_find = True
elif north_find == True and east_find == True:
line_list = np.array(line_list, dtype=np.int)
line_list = line_list.reshape((ny, 1))
self.mask_arr[count, :, index_00:index_01+1] = line_list
count += 1
def get_parameters(self):
parameter_list = ['smoothing_north',
'smoothing_east',
'smoothing_z',
'smoothing_num']
parameter_dict = {}
for parameter in parameter_list:
key = 'covariance.{0}'.format(parameter)
parameter_dict[key] = getattr(self, parameter)
return parameter_dict
def write_cov_vtk_file(self, cov_vtk_fn, model_fn=None, grid_east=None,
grid_north=None, grid_z=None):
"""
write a vtk file of the covariance to match things up
"""
if model_fn is not None:
m_obj = Model()
m_obj.read_model_file(model_fn)
grid_east = m_obj.grid_east
grid_north = m_obj.grid_north
grid_z = m_obj.grid_z
if grid_east is not None:
grid_east = grid_east
if grid_north is not None:
grid_north = grid_north
if grid_z is not None:
grid_z = grid_z
# use cellData, this makes the grid properly as grid is n+1
gridToVTK(cov_vtk_fn,
grid_north/1000.,
grid_east/1000.,
grid_z/1000.,
cellData={'covariance_mask':self.mask_arr})
print '-'*50
print '--> Wrote covariance file to {0}\n'.format(cov_vtk_fn)
print '='*26
#==============================================================================
# Write inversion parameters to a config type file
#==============================================================================
class ModEM_Config(object):
"""
read and write configuration files for how each inversion is run
"""
def __init__(self, **kwargs):
self.cfg_dict = {'ModEM_Inversion_Parameters':{}}
for key in kwargs.keys():
setattr(self, key, kwargs[key])
def write_config_file(self, save_dir=None,
config_fn_basename='ModEM_inv.cfg'):
"""
write a config file based on provided information
"""
if save_dir is None:
save_dir = os.getcwd()
cfg_fn = os.path.join(save_dir, config_fn_basename)
if self.cfg_dict is not None:
mtcfg.write_dict_to_configfile(self.cfg_dict,
cfg_fn)
def add_dict(self, fn=None, obj=None):
"""
add dictionary based on file name or object
"""
if fn is not None:
if fn.endswith('.rho'):
m_obj = Model()
m_obj.read_model_file(fn)
elif fn.endswith('.dat'):
m_obj = Data()
m_obj.read_data_file(fn)
elif fn.endswith('.cov'):
m_obj = Covariance()
m_obj.read_cov_fn(fn)
elif obj is not None:
m_obj = obj
else:
raise ModEMError('Need to input a file name or object')
add_dict = m_obj.get_parameters()
for key in add_dict.keys():
self.cfg_dict['ModEM_Inversion_Parameters'][key] = add_dict[key]
#==============================================================================
# Manipulate the model to test structures or create a starting model
#==============================================================================
class ModelManipulator(Model):
"""
will plot a model from wsinv3d or init file so the user can manipulate the
resistivity values relatively easily. At the moment only plotted
in map view.
:Example: ::
>>> import mtpy.modeling.ws3dinv as ws
>>> initial_fn = r"/home/MT/ws3dinv/Inv1/WSInitialFile"
>>> mm = ws.WSModelManipulator(initial_fn=initial_fn)
=================== =======================================================
Buttons Description
=================== =======================================================
'=' increase depth to next vertical node (deeper)
'-' decrease depth to next vertical node (shallower)
'q' quit the plot, rewrites initial file when pressed
'a' copies the above horizontal layer to the present layer
'b' copies the below horizonal layer to present layer
'u' undo previous change
=================== =======================================================
=================== =======================================================
Attributes Description
=================== =======================================================
ax1 matplotlib.axes instance for mesh plot of the model
ax2 matplotlib.axes instance of colorbar
cb matplotlib.colorbar instance for colorbar
cid_depth matplotlib.canvas.connect for depth
cmap matplotlib.colormap instance
cmax maximum value of resistivity for colorbar. (linear)
cmin minimum value of resistivity for colorbar (linear)
data_fn full path fo data file
depth_index integer value of depth slice for plotting
dpi resolution of figure in dots-per-inch
dscale depth scaling, computed internally
east_line_xlist list of east mesh lines for faster plotting
east_line_ylist list of east mesh lines for faster plotting
fdict dictionary of font properties
fig matplotlib.figure instance
fig_num number of figure instance
fig_size size of figure in inches
font_size size of font in points
grid_east location of east nodes in relative coordinates
grid_north location of north nodes in relative coordinates
grid_z location of vertical nodes in relative coordinates
initial_fn full path to initial file
m_height mean height of horizontal cells
m_width mean width of horizontal cells
map_scale [ 'm' | 'km' ] scale of map
mesh_east np.meshgrid of east, north
mesh_north np.meshgrid of east, north
mesh_plot matplotlib.axes.pcolormesh instance
model_fn full path to model file
new_initial_fn full path to new initial file
nodes_east spacing between east nodes
nodes_north spacing between north nodes
nodes_z spacing between vertical nodes
north_line_xlist list of coordinates of north nodes for faster plotting
north_line_ylist list of coordinates of north nodes for faster plotting
plot_yn [ 'y' | 'n' ] plot on instantiation
radio_res matplotlib.widget.radio instance for change resistivity
rect_selector matplotlib.widget.rect_selector
res np.ndarray(nx, ny, nz) for model in linear resistivity
res_copy copy of res for undo
res_dict dictionary of segmented resistivity values
res_list list of resistivity values for model linear scale
res_model np.ndarray(nx, ny, nz) of resistivity values from
res_list (linear scale)
res_model_int np.ndarray(nx, ny, nz) of integer values corresponding
to res_list for initial model
res_value current resistivty value of radio_res
save_path path to save initial file to
station_east station locations in east direction
station_north station locations in north direction
xlimits limits of plot in e-w direction
ylimits limits of plot in n-s direction
=================== =======================================================
"""
def __init__(self, model_fn=None, data_fn=None, **kwargs):
#be sure to initialize Model
Model.__init__(self, model_fn=model_fn, **kwargs)
self.data_fn = data_fn
self.model_fn_basename = kwargs.pop('model_fn_basename',
'ModEM_Model_rw.ws')
if self.model_fn is not None:
self.save_path = os.path.dirname(self.model_fn)
elif self.data_fn is not None:
self.save_path = os.path.dirname(self.data_fn)
else:
self.save_path = os.getcwd()
#station locations in relative coordinates read from data file
self.station_east = None
self.station_north = None
#--> set map scale
self.map_scale = kwargs.pop('map_scale', 'km')
self.m_width = 100
self.m_height = 100
#--> scale the map coordinates
if self.map_scale=='km':
self.dscale = 1000.
if self.map_scale=='m':
self.dscale = 1.
#figure attributes
self.fig = None
self.ax1 = None
self.ax2 = None
self.cb = None
self.east_line_xlist = None
self.east_line_ylist = None
self.north_line_xlist = None
self.north_line_ylist = None
#make a default resistivity list to change values
self._res_sea = 0.3
self._res_air = 1E12
self.res_dict = None
self.res_list = kwargs.pop('res_list', None)
if self.res_list is None:
self.set_res_list(np.array([self._res_sea, 1, 10, 50, 100, 500,
1000, 5000],
dtype=np.float))
#set initial resistivity value
self.res_value = self.res_list[0]
self.cov_arr = None
#--> set map limits
self.xlimits = kwargs.pop('xlimits', None)
self.ylimits = kwargs.pop('ylimits', None)
self.font_size = kwargs.pop('font_size', 7)
self.fig_dpi = kwargs.pop('fig_dpi', 300)
self.fig_num = kwargs.pop('fig_num', 1)
self.fig_size = kwargs.pop('fig_size', [6, 6])
self.cmap = kwargs.pop('cmap', cm.jet_r)
self.depth_index = kwargs.pop('depth_index', 0)
self.fdict = {'size':self.font_size+2, 'weight':'bold'}
self.subplot_wspace = kwargs.pop('subplot_wspace', .3)
self.subplot_hspace = kwargs.pop('subplot_hspace', .0)
self.subplot_right = kwargs.pop('subplot_right', .8)
self.subplot_left = kwargs.pop('subplot_left', .01)
self.subplot_top = kwargs.pop('subplot_top', .93)
self.subplot_bottom = kwargs.pop('subplot_bottom', .1)
#plot on initialization
self.plot_yn = kwargs.pop('plot_yn', 'y')
if self.plot_yn=='y':
self.get_model()
self.plot()
def set_res_list(self, res_list):
"""
on setting res_list also set the res_dict to correspond
"""
self.res_list = res_list
#make a dictionary of values to write to file.
self.res_dict = dict([(res, ii)
for ii, res in enumerate(self.res_list,1)])
if self.fig is not None:
plt.close()
self.plot()
#---read files-------------------------------------------------------------
def get_model(self):
"""
reads in initial file or model file and set attributes:
-resmodel
-northrid
-eastrid
-zgrid
-res_list if initial file
"""
#--> read in model file
self.read_model_file()
self.cov_arr = np.ones_like(self.res_model)
#--> read in data file if given
if self.data_fn is not None:
md_data = Data()
md_data.read_data_file(self.data_fn)
#get station locations
self.station_east = md_data.station_locations.rel_east
self.station_north = md_data.station_locations.rel_north
#get cell block sizes
self.m_height = np.median(self.nodes_north[5:-5])/self.dscale
self.m_width = np.median(self.nodes_east[5:-5])/self.dscale
#make a copy of original in case there are unwanted changes
self.res_copy = self.res_model.copy()
#---plot model-------------------------------------------------------------
def plot(self):
"""
plots the model with:
-a radio dial for depth slice
-radio dial for resistivity value
"""
# set plot properties
plt.rcParams['font.size'] = self.font_size
plt.rcParams['figure.subplot.left'] = self.subplot_left
plt.rcParams['figure.subplot.right'] = self.subplot_right
plt.rcParams['figure.subplot.bottom'] = self.subplot_bottom
plt.rcParams['figure.subplot.top'] = self.subplot_top
font_dict = {'size':self.font_size+2, 'weight':'bold'}
#make sure there is a model to plot
if self.res_model is None:
self.get_model()
self.cmin = np.floor(np.log10(min(self.res_list)))
self.cmax = np.ceil(np.log10(max(self.res_list)))
#-->Plot properties
plt.rcParams['font.size'] = self.font_size
#need to add an extra row and column to east and north to make sure
#all is plotted see pcolor for details.
plot_east = self.grid_east/self.dscale
plot_north = self.grid_north/self.dscale
#make a mesh grid for plotting
#the 'ij' makes sure the resulting grid is in east, north
self.mesh_east, self.mesh_north = np.meshgrid(plot_east,
plot_north,
indexing='ij')
self.fig = plt.figure(self.fig_num, self.fig_size, dpi=self.fig_dpi)
plt.clf()
self.ax1 = self.fig.add_subplot(1, 1, 1, aspect='equal')
#transpose to make x--east and y--north
plot_res = np.log10(self.res_model[:,:,self.depth_index].T)
self.mesh_plot = self.ax1.pcolormesh(self.mesh_east,
self.mesh_north,
plot_res,
cmap=self.cmap,
vmin=self.cmin,
vmax=self.cmax)
#on plus or minus change depth slice
self.cid_depth = \
self.mesh_plot.figure.canvas.mpl_connect('key_press_event',
self._on_key_callback)
#plot the stations
if self.station_east is not None:
for ee, nn in zip(self.station_east, self.station_north):
self.ax1.text(ee/self.dscale, nn/self.dscale,
'*',
verticalalignment='center',
horizontalalignment='center',
fontdict={'size':self.font_size-2,
'weight':'bold'})
#set axis properties
if self.xlimits is not None:
self.ax1.set_xlim(self.xlimits)
else:
self.ax1.set_xlim(xmin=self.grid_east.min()/self.dscale,
xmax=self.grid_east.max()/self.dscale)
if self.ylimits is not None:
self.ax1.set_ylim(self.ylimits)
else:
self.ax1.set_ylim(ymin=self.grid_north.min()/self.dscale,
ymax=self.grid_north.max()/self.dscale)
#self.ax1.xaxis.set_minor_locator(MultipleLocator(100*1./dscale))
#self.ax1.yaxis.set_minor_locator(MultipleLocator(100*1./dscale))
self.ax1.set_ylabel('Northing ('+self.map_scale+')',
fontdict=self.fdict)
self.ax1.set_xlabel('Easting ('+self.map_scale+')',
fontdict=self.fdict)
depth_title = self.grid_z[self.depth_index]/self.dscale
self.ax1.set_title('Depth = {:.3f} '.format(depth_title)+\
'('+self.map_scale+')',
fontdict=self.fdict)
#plot the grid if desired
self.east_line_xlist = []
self.east_line_ylist = []
for xx in self.grid_east:
self.east_line_xlist.extend([xx/self.dscale, xx/self.dscale])
self.east_line_xlist.append(None)
self.east_line_ylist.extend([self.grid_north.min()/self.dscale,
self.grid_north.max()/self.dscale])
self.east_line_ylist.append(None)
self.ax1.plot(self.east_line_xlist,
self.east_line_ylist,
lw=.25,
color='k')
self.north_line_xlist = []
self.north_line_ylist = []
for yy in self.grid_north:
self.north_line_xlist.extend([self.grid_east.min()/self.dscale,
self.grid_east.max()/self.dscale])
self.north_line_xlist.append(None)
self.north_line_ylist.extend([yy/self.dscale, yy/self.dscale])
self.north_line_ylist.append(None)
self.ax1.plot(self.north_line_xlist,
self.north_line_ylist,
lw=.25,
color='k')
#plot the colorbar
# self.ax2 = mcb.make_axes(self.ax1, orientation='vertical', shrink=.35)
self.ax2 = self.fig.add_axes([.81, .45, .16, .03])
self.ax2.xaxis.set_ticks_position('top')
#seg_cmap = ws.cmap_discretize(self.cmap, len(self.res_list))
self.cb = mcb.ColorbarBase(self.ax2,cmap=self.cmap,
norm=colors.Normalize(vmin=self.cmin,
vmax=self.cmax),
orientation='horizontal')
self.cb.set_label('Resistivity ($\Omega \cdot$m)',
fontdict={'size':self.font_size})
self.cb.set_ticks(np.arange(self.cmin, self.cmax+1))
self.cb.set_ticklabels([mtplottools.labeldict[cc]
for cc in np.arange(self.cmin, self.cmax+1)])
#make a resistivity radio button
#resrb = self.fig.add_axes([.85,.1,.1,.2])
#reslabels = ['{0:.4g}'.format(res) for res in self.res_list]
#self.radio_res = widgets.RadioButtons(resrb, reslabels,
# active=self.res_dict[self.res_value])
# slider_ax_bounds = list(self.cb.ax.get_position().bounds)
# slider_ax_bounds[0] += .1
slider_ax = self.fig.add_axes([.81, .5, .16, .03])
self.slider_res = widgets.Slider(slider_ax, 'Resistivity',
self.cmin, self.cmax,
valinit=2)
#make a rectangular selector
self.rect_selector = widgets.RectangleSelector(self.ax1,
self.rect_onselect,
drawtype='box',
useblit=True)
plt.show()
#needs to go after show()
self.slider_res.on_changed(self.set_res_value)
#self.radio_res.on_clicked(self.set_res_value)
def redraw_plot(self):
"""
redraws the plot
"""
current_xlimits = self.ax1.get_xlim()
current_ylimits = self.ax1.get_ylim()
self.ax1.cla()
plot_res = np.log10(self.res_model[:,:,self.depth_index].T)
self.mesh_plot = self.ax1.pcolormesh(self.mesh_east,
self.mesh_north,
plot_res,
cmap=self.cmap,
vmin=self.cmin,
vmax=self.cmax)
#plot the stations
if self.station_east is not None:
for ee,nn in zip(self.station_east, self.station_north):
self.ax1.text(ee/self.dscale, nn/self.dscale,
'*',
verticalalignment='center',
horizontalalignment='center',
fontdict={'size':self.font_size-2,
'weight':'bold'})
#set axis properties
if self.xlimits is not None:
self.ax1.set_xlim(self.xlimits)
else:
self.ax1.set_xlim(current_xlimits)
if self.ylimits is not None:
self.ax1.set_ylim(self.ylimits)
else:
self.ax1.set_ylim(current_ylimits)
self.ax1.set_ylabel('Northing ('+self.map_scale+')',
fontdict=self.fdict)
self.ax1.set_xlabel('Easting ('+self.map_scale+')',
fontdict=self.fdict)
depth_title = self.grid_z[self.depth_index]/self.dscale
self.ax1.set_title('Depth = {:.3f} '.format(depth_title)+\
'('+self.map_scale+')',
fontdict=self.fdict)
#plot finite element mesh
self.ax1.plot(self.east_line_xlist,
self.east_line_ylist,
lw=.25,
color='k')
self.ax1.plot(self.north_line_xlist,
self.north_line_ylist,
lw=.25,
color='k')
#be sure to redraw the canvas
self.fig.canvas.draw()
# def set_res_value(self, label):
# self.res_value = float(label)
# print 'set resistivity to ', label
# print self.res_value
def set_res_value(self, val):
self.res_value = 10**val
print 'set resistivity to ', self.res_value
def _on_key_callback(self,event):
"""
on pressing a key do something
"""
self.event_change_depth = event
#go down a layer on push of +/= keys
if self.event_change_depth.key == '=':
self.depth_index += 1
if self.depth_index>len(self.grid_z)-1:
self.depth_index = len(self.grid_z)-1
print 'already at deepest depth'
print 'Plotting Depth {0:.3f}'.format(self.grid_z[self.depth_index]/\
self.dscale)+'('+self.map_scale+')'
self.redraw_plot()
#go up a layer on push of - key
elif self.event_change_depth.key == '-':
self.depth_index -= 1
if self.depth_index < 0:
self.depth_index = 0
print 'Plotting Depth {0:.3f} '.format(self.grid_z[self.depth_index]/\
self.dscale)+'('+self.map_scale+')'
self.redraw_plot()
#exit plot on press of q
elif self.event_change_depth.key == 'q':
self.event_change_depth.canvas.mpl_disconnect(self.cid_depth)
plt.close(self.event_change_depth.canvas.figure)
self.rewrite_model_file()
#copy the layer above
elif self.event_change_depth.key == 'a':
try:
if self.depth_index == 0:
print 'No layers above'
else:
self.res_model[:, :, self.depth_index] = \
self.res_model[:, :, self.depth_index-1]
except IndexError:
print 'No layers above'
self.redraw_plot()
#copy the layer below
elif self.event_change_depth.key == 'b':
try:
self.res_model[:, :, self.depth_index] = \
self.res_model[:, :, self.depth_index+1]
except IndexError:
print 'No more layers below'
self.redraw_plot()
#undo
elif self.event_change_depth.key == 'u':
if type(self.xchange) is int and type(self.ychange) is int:
self.res_model[self.ychange, self.xchange, self.depth_index] =\
self.res_copy[self.ychange, self.xchange, self.depth_index]
else:
for xx in self.xchange:
for yy in self.ychange:
self.res_model[yy, xx, self.depth_index] = \
self.res_copy[yy, xx, self.depth_index]
self.redraw_plot()
def change_model_res(self, xchange, ychange):
"""
change resistivity values of resistivity model
"""
if type(xchange) is int and type(ychange) is int:
self.res_model[ychange, xchange, self.depth_index] = self.res_value
else:
for xx in xchange:
for yy in ychange:
self.res_model[yy, xx, self.depth_index] = self.res_value
self.redraw_plot()
def rect_onselect(self, eclick, erelease):
"""
on selecting a rectangle change the colors to the resistivity values
"""
x1, y1 = eclick.xdata, eclick.ydata
x2, y2 = erelease.xdata, erelease.ydata
self.xchange = self._get_east_index(x1, x2)
self.ychange = self._get_north_index(y1, y2)
#reset values of resistivity
self.change_model_res(self.xchange, self.ychange)
def _get_east_index(self, x1, x2):
"""
get the index value of the points to be changed
"""
if x1 < x2:
xchange = np.where((self.grid_east/self.dscale >= x1) & \
(self.grid_east/self.dscale <= x2))[0]
if len(xchange) == 0:
xchange = np.where(self.grid_east/self.dscale >= x1)[0][0]-1
return [xchange]
if x1 > x2:
xchange = np.where((self.grid_east/self.dscale <= x1) & \
(self.grid_east/self.dscale >= x2))[0]
if len(xchange) == 0:
xchange = np.where(self.grid_east/self.dscale >= x2)[0][0]-1
return [xchange]
#check the edges to see if the selection should include the square
xchange = np.append(xchange, xchange[0]-1)
xchange.sort()
return xchange
def _get_north_index(self, y1, y2):
"""
get the index value of the points to be changed in north direction
need to flip the index because the plot is flipped
"""
if y1 < y2:
ychange = np.where((self.grid_north/self.dscale > y1) & \
(self.grid_north/self.dscale < y2))[0]
if len(ychange) == 0:
ychange = np.where(self.grid_north/self.dscale >= y1)[0][0]-1
return [ychange]
elif y1 > y2:
ychange = np.where((self.grid_north/self.dscale < y1) & \
(self.grid_north/self.dscale > y2))[0]
if len(ychange) == 0:
ychange = np.where(self.grid_north/self.dscale >= y2)[0][0]-1
return [ychange]
ychange -= 1
ychange = np.append(ychange, ychange[-1]+1)
return ychange
def rewrite_model_file(self, model_fn=None, save_path=None,
model_fn_basename=None):
"""
write an initial file for wsinv3d from the model created.
"""
if save_path is not None:
self.save_path = save_path
self.model_fn = model_fn
if model_fn_basename is not None:
self.model_fn_basename = model_fn_basename
self.write_model_file()
#==============================================================================
# plot response
#==============================================================================
class PlotResponse(object):
"""
plot data and response
Plots the real and imaginary impedance and induction vector if present.
:Example: ::
>>> import mtpy.modeling.new_modem as modem
>>> dfn = r"/home/MT/ModEM/Inv1/DataFile.dat"
>>> rfn = r"/home/MT/ModEM/Inv1/Test_resp_000.dat"
>>> mrp = modem.PlotResponse(data_fn=dfn, resp_fn=rfn)
>>> # plot only the TE and TM modes
>>> mrp.plot_component = 2
>>> mrp.redraw_plot()
======================== ==================================================
Attributes Description
======================== ==================================================
color_mode [ 'color' | 'bw' ] color or black and white plots
cted color for data TE mode
ctem color for data TM mode
ctmd color for model TE mode
ctmm color for model TM mode
data_fn full path to data file
data_object WSResponse instance
e_capsize cap size of error bars in points (*default* is .5)
e_capthick cap thickness of error bars in points (*default*
is 1)
fig_dpi resolution of figure in dots-per-inch (300)
fig_list list of matplotlib.figure instances for plots
fig_size size of figure in inches (*default* is [6, 6])
font_size size of font for tick labels, axes labels are
font_size+2 (*default* is 7)
legend_border_axes_pad padding between legend box and axes
legend_border_pad padding between border of legend and symbols
legend_handle_text_pad padding between text labels and symbols of legend
legend_label_spacing padding between labels
legend_loc location of legend
legend_marker_scale scale of symbols in legend
lw line width response curves (*default* is .5)
ms size of markers (*default* is 1.5)
mted marker for data TE mode
mtem marker for data TM mode
mtmd marker for model TE mode
mtmm marker for model TM mode
phase_limits limits of phase
plot_component [ 2 | 4 ] 2 for TE and TM or 4 for all components
plot_style [ 1 | 2 ] 1 to plot each mode in a seperate
subplot and 2 to plot xx, xy and yx, yy in same
plots
plot_type [ '1' | list of station name ] '1' to plot all
stations in data file or input a list of station
names to plot if station_fn is input, otherwise
input a list of integers associated with the
index with in the data file, ie 2 for 2nd station
plot_z [ True | False ] *default* is True to plot
impedance, False for plotting resistivity and
phase
plot_yn [ 'n' | 'y' ] to plot on instantiation
res_limits limits of resistivity in linear scale
resp_fn full path to response file
resp_object WSResponse object for resp_fn, or list of
WSResponse objects if resp_fn is a list of
response files
station_fn full path to station file written by WSStation
subplot_bottom space between axes and bottom of figure
subplot_hspace space between subplots in vertical direction
subplot_left space between axes and left of figure
subplot_right space between axes and right of figure
subplot_top space between axes and top of figure
subplot_wspace space between subplots in horizontal direction
======================== ==================================================
"""
def __init__(self, data_fn=None, resp_fn=None, **kwargs):
self.data_fn = data_fn
self.resp_fn = resp_fn
self.data_object = None
self.resp_object = []
self.color_mode = kwargs.pop('color_mode', 'color')
self.ms = kwargs.pop('ms', 1.5)
self.ms_r = kwargs.pop('ms_r', 3)
self.lw = kwargs.pop('lw', .5)
self.lw_r = kwargs.pop('lw_r', 1.0)
self.e_capthick = kwargs.pop('e_capthick', .5)
self.e_capsize = kwargs.pop('e_capsize', 2)
#color mode
if self.color_mode == 'color':
#color for data
self.cted = kwargs.pop('cted', (0, 0, 1))
self.ctmd = kwargs.pop('ctmd', (1, 0, 0))
self.mted = kwargs.pop('mted', 's')
self.mtmd = kwargs.pop('mtmd', 'o')
#color for occam2d model
self.ctem = kwargs.pop('ctem', (0, .6, .3))
self.ctmm = kwargs.pop('ctmm', (.9, 0, .8))
self.mtem = kwargs.pop('mtem', '+')
self.mtmm = kwargs.pop('mtmm', '+')
#black and white mode
elif self.color_mode == 'bw':
#color for data
self.cted = kwargs.pop('cted', (0, 0, 0))
self.ctmd = kwargs.pop('ctmd', (0, 0, 0))
self.mted = kwargs.pop('mted', 's')
self.mtmd = kwargs.pop('mtmd', 'o')
#color for occam2d model
self.ctem = kwargs.pop('ctem', (0.6, 0.6, 0.6))
self.ctmm = kwargs.pop('ctmm', (0.6, 0.6, 0.6))
self.mtem = kwargs.pop('mtem', '+')
self.mtmm = kwargs.pop('mtmm', 'x')
self.phase_limits_d = kwargs.pop('phase_limits_d', None)
self.phase_limits_od = kwargs.pop('phase_limits_od', None)
self.res_limits_d = kwargs.pop('res_limits_d', None)
self.res_limits_od = kwargs.pop('res_limits_od', None)
self.tipper_limits = kwargs.pop('tipper_limits', None)
self.fig_num = kwargs.pop('fig_num', 1)
self.fig_size = kwargs.pop('fig_size', [6, 6])
self.fig_dpi = kwargs.pop('dpi', 300)
self.subplot_wspace = kwargs.pop('subplot_wspace', .3)
self.subplot_hspace = kwargs.pop('subplot_hspace', .0)
self.subplot_right = kwargs.pop('subplot_right', .98)
self.subplot_left = kwargs.pop('subplot_left', .08)
self.subplot_top = kwargs.pop('subplot_top', .85)
self.subplot_bottom = kwargs.pop('subplot_bottom', .1)
self.legend_loc = 'upper center'
self.legend_pos = (.5, 1.18)
self.legend_marker_scale = 1
self.legend_border_axes_pad = .01
self.legend_label_spacing = 0.07
self.legend_handle_text_pad = .2
self.legend_border_pad = .15
self.font_size = kwargs.pop('font_size', 6)
self.plot_type = kwargs.pop('plot_type', '1')
self.plot_style = kwargs.pop('plot_style', 1)
self.plot_component = kwargs.pop('plot_component', 4)
self.plot_yn = kwargs.pop('plot_yn', 'y')
self.plot_z = kwargs.pop('plot_z', True)
self.ylabel_pad = kwargs.pop('ylabel_pad', 1.25)
self.fig_list = []
if self.plot_yn == 'y':
self.plot()
def plot(self):
"""
plot
"""
self.data_object = Data()
self.data_object.read_data_file(self.data_fn)
#get shape of impedance tensors
ns = len(self.data_object.mt_dict.keys())
#read in response files
if self.resp_fn != None:
self.resp_object = []
if type(self.resp_fn) is not list:
resp_obj = Data()
resp_obj.read_data_file(self.resp_fn)
self.resp_object = [resp_obj]
else:
for rfile in self.resp_fn:
resp_obj = Data()
resp_obj.read_data_file(rfile)
self.resp_object.append(resp_obj)
#get number of response files
nr = len(self.resp_object)
if type(self.plot_type) is list:
ns = len(self.plot_type)
#--> set default font size
plt.rcParams['font.size'] = self.font_size
fontdict = {'size':self.font_size+2, 'weight':'bold'}
if self.plot_z == True:
h_ratio = [1, 1, .5]
elif self.plot_z == False:
h_ratio = [1.5, 1, .5]
ax_list = []
line_list = []
label_list = []
#--> make key word dictionaries for plotting
kw_xx = {'color':self.cted,
'marker':self.mted,
'ms':self.ms,
'ls':':',
'lw':self.lw,
'e_capsize':self.e_capsize,
'e_capthick':self.e_capthick}
kw_yy = {'color':self.ctmd,
'marker':self.mtmd,
'ms':self.ms,
'ls':':',
'lw':self.lw,
'e_capsize':self.e_capsize,
'e_capthick':self.e_capthick}
if self.plot_type != '1':
pstation_list = []
if type(self.plot_type) is not list:
self.plot_type = [self.plot_type]
for ii, station in enumerate(self.data_object.mt_dict.keys()):
if type(station) is not int:
for pstation in self.plot_type:
if station.find(str(pstation)) >= 0:
pstation_list.append(station)
else:
for pstation in self.plot_type:
if station == int(pstation):
pstation_list.append(ii)
else:
pstation_list = self.data_object.mt_dict.keys()
for jj, station in enumerate(pstation_list):
z_obj = self.data_object.mt_dict[station].Z
t_obj = self.data_object.mt_dict[station].Tipper
period = self.data_object.period_list
print 'Plotting: {0}'.format(station)
#convert to apparent resistivity and phase
z_obj._compute_res_phase()
#find locations where points have been masked
nzxx = np.nonzero(z_obj.z[:, 0, 0])[0]
nzxy = np.nonzero(z_obj.z[:, 0, 1])[0]
nzyx = np.nonzero(z_obj.z[:, 1, 0])[0]
nzyy = np.nonzero(z_obj.z[:, 1, 1])[0]
ntx = np.nonzero(t_obj.tipper[:, 0, 0])[0]
nty = np.nonzero(t_obj.tipper[:, 0, 1])[0]
#convert to apparent resistivity and phase
if self.plot_z == True:
scaling = np.zeros_like(z_obj.z)
for ii in range(2):
for jj in range(2):
scaling[:, ii, jj] = 1./np.sqrt(z_obj.freq)
plot_res = abs(z_obj.z.real*scaling)
plot_res_err = abs(z_obj.z_err*scaling)
plot_phase = abs(z_obj.z.imag*scaling)
plot_phase_err = abs(z_obj.z_err*scaling)
h_ratio = [1, 1, .5]
elif self.plot_z == False:
plot_res = z_obj.resistivity
plot_res_err = z_obj.resistivity_err
plot_phase = z_obj.phase
plot_phase_err = z_obj.phase_err
h_ratio = [1.5, 1, .5]
try:
self.res_limits_d = (10**(np.floor(np.log10(min([plot_res[nzxx, 0, 0].min(),
plot_res[nzyy, 1, 1].min()])))),
10**(np.ceil(np.log10(max([plot_res[nzxx, 0, 0].max(),
plot_res[nzyy, 1, 1].max()])))))
except ValueError:
self.res_limits_d = None
try:
self.res_limits_od = (10**(np.floor(np.log10(min([plot_res[nzxy, 0, 1].min(),
plot_res[nzyx, 1, 0].min()])))),
10**(np.ceil(np.log10(max([plot_res[nzxy, 0, 1].max(),
plot_res[nzyx, 1, 0].max()])))))
except ValueError:
self.res_limits_od = None
#make figure
fig = plt.figure(station, self.fig_size, dpi=self.fig_dpi)
plt.clf()
fig.suptitle(str(station), fontdict=fontdict)
#set the grid of subplots
if np.all(t_obj.tipper == 0.0) == True:
self.plot_tipper = False
else:
self.plot_tipper = True
self.tipper_limits = (np.round(min([t_obj.tipper[ntx, 0, 0].real.min(),
t_obj.tipper[nty, 0, 1].real.min(),
t_obj.tipper[ntx, 0, 0].imag.min(),
t_obj.tipper[nty, 0, 1].imag.min()]),
1),
np.round(max([t_obj.tipper[ntx, 0, 0].real.max(),
t_obj.tipper[nty, 0, 1].real.max(),
t_obj.tipper[ntx, 0, 0].imag.max(),
t_obj.tipper[nty, 0, 1].imag.max()]),
1))
gs = gridspec.GridSpec(3, 4,
wspace=self.subplot_wspace,
left=self.subplot_left,
top=self.subplot_top,
bottom=self.subplot_bottom,
right=self.subplot_right,
hspace=self.subplot_hspace,
height_ratios=h_ratio)
axrxx = fig.add_subplot(gs[0, 0])
axrxy = fig.add_subplot(gs[0, 1], sharex=axrxx)
axryx = fig.add_subplot(gs[0, 2], sharex=axrxx, sharey=axrxy)
axryy = fig.add_subplot(gs[0, 3], sharex=axrxx, sharey=axrxx)
axpxx = fig.add_subplot(gs[1, 0])
axpxy = fig.add_subplot(gs[1, 1], sharex=axrxx)
axpyx = fig.add_subplot(gs[1, 2], sharex=axrxx)
axpyy = fig.add_subplot(gs[1, 3], sharex=axrxx)
axtxr = fig.add_subplot(gs[2, 0], sharex=axrxx)
axtxi = fig.add_subplot(gs[2, 1], sharex=axrxx, sharey=axtxr)
axtyr = fig.add_subplot(gs[2, 2], sharex=axrxx)
axtyi = fig.add_subplot(gs[2, 3], sharex=axrxx, sharey=axtyr)
self.ax_list = [axrxx, axrxy, axryx, axryy,
axpxx, axpxy, axpyx, axpyy,
axtxr, axtxi, axtyr, axtyi]
#---------plot the apparent resistivity-----------------------------------
#plot each component in its own subplot
# plot data response
erxx = mtplottools.plot_errorbar(axrxx,
period[nzxx],
plot_res[nzxx, 0, 0],
plot_res_err[nzxx, 0, 0],
**kw_xx)
erxy = mtplottools.plot_errorbar(axrxy,
period[nzxy],
plot_res[nzxy, 0, 1],
plot_res_err[nzxy, 0, 1],
**kw_xx)
eryx = mtplottools.plot_errorbar(axryx,
period[nzyx],
plot_res[nzyx, 1, 0],
plot_res_err[nzyx, 1, 0],
**kw_yy)
eryy = mtplottools.plot_errorbar(axryy,
period[nzyy],
plot_res[nzyy, 1, 1],
plot_res_err[nzyy, 1, 1],
**kw_yy)
#plot phase
epxx = mtplottools.plot_errorbar(axpxx,
period[nzxx],
plot_phase[nzxx, 0, 0],
plot_phase_err[nzxx, 0, 0],
**kw_xx)
epxy = mtplottools.plot_errorbar(axpxy,
period[nzxy],
plot_phase[nzxy, 0, 1],
plot_phase_err[nzxy, 0, 1],
**kw_xx)
epyx = mtplottools.plot_errorbar(axpyx,
period[nzyx],
plot_phase[nzyx, 1, 0],
plot_phase_err[nzyx, 1, 0],
**kw_yy)
epyy = mtplottools.plot_errorbar(axpyy,
period[nzyy],
plot_phase[nzyy, 1, 1],
plot_phase_err[nzyy, 1, 1],
**kw_yy)
#plot tipper
if self.plot_tipper == True:
ertx = mtplottools.plot_errorbar(axtxr,
period[ntx],
t_obj.tipper[ntx, 0, 0].real,
t_obj.tipper_err[ntx, 0, 0],
**kw_xx)
erty = mtplottools.plot_errorbar(axtyr,
period[nty],
t_obj.tipper[nty, 0, 1].real,
t_obj.tipper_err[nty, 0, 1],
**kw_yy)
eptx = mtplottools.plot_errorbar(axtxi,
period[ntx],
t_obj.tipper[ntx, 0, 0].imag,
t_obj.tipper_err[ntx, 0, 0],
**kw_xx)
epty = mtplottools.plot_errorbar(axtyi,
period[nty],
t_obj.tipper[nty, 0, 1].imag,
t_obj.tipper_err[nty, 0, 1],
**kw_yy)
#----------------------------------------------
# get error bar list for editing later
if self.plot_tipper == False:
try:
self._err_list = [[erxx[1][0], erxx[1][1], erxx[2][0]],
[erxy[1][0], erxy[1][1], erxy[2][0]],
[eryx[1][0], eryx[1][1], eryx[2][0]],
[eryy[1][0], eryy[1][1], eryy[2][0]]]
line_list = [[erxx[0]], [erxy[0]], [eryx[0]], [eryy[0]]]
except IndexError:
print 'Found no Z components for {0}'.format(self.station)
line_list = [[None], [None],
[None], [None]]
self._err_list = [[None, None, None],
[None, None, None],
[None, None, None],
[None, None, None]]
else:
try:
line_list = [[erxx[0]], [erxy[0]],
[eryx[0]], [eryy[0]],
[ertx[0]], [erty[0]]]
self._err_list = [[erxx[1][0], erxx[1][1], erxx[2][0]],
[erxy[1][0], erxy[1][1], erxy[2][0]],
[eryx[1][0], eryx[1][1], eryx[2][0]],
[eryy[1][0], eryy[1][1], eryy[2][0]],
[ertx[1][0], ertx[1][1], ertx[2][0]],
[erty[1][0], erty[1][1], erty[2][0]]]
except IndexError:
print 'Found no Z components for {0}'.format(station)
line_list = [[None], [None],
[None], [None],
[None], [None]]
self._err_list = [[None, None, None],
[None, None, None],
[None, None, None],
[None, None, None],
[None, None, None],
[None, None, None]]
#------------------------------------------
# make things look nice
# set titles of the Z components
label_list = [['$Z_{xx}$'], ['$Z_{xy}$'],
['$Z_{yx}$'], ['$Z_{yy}$']]
for ax, label in zip(self.ax_list[0:4], label_list):
ax.set_title(label[0],fontdict={'size':self.font_size+2,
'weight':'bold'})
# set legends for tipper components
# fake a line
l1 = plt.Line2D([0], [0], linewidth=0, color='w', linestyle='None',
marker='.')
t_label_list = ['Re{$T_x$}', 'Im{$T_x$}', 'Re{$T_y$}', 'Im{$T_y$}']
label_list += [['$T_{x}$'], ['$T_{y}$']]
for ax, label in zip(self.ax_list[-4:], t_label_list):
ax.legend([l1], [label], loc='upper left',
markerscale=.01,
borderaxespad=.05,
labelspacing=.01,
handletextpad=.05,
borderpad=.05,
prop={'size':max([self.font_size, 6])})
#set axis properties
for aa, ax in enumerate(self.ax_list):
ax.tick_params(axis='y', pad=self.ylabel_pad)
if aa < 8:
# ylabels[-1] = ''
# ylabels[0] = ''
# ax.set_yticklabels(ylabels)
# plt.setp(ax.get_xticklabels(), visible=False)
if self.plot_z == True:
ax.set_yscale('log')
else:
ax.set_xlabel('Period (s)', fontdict=fontdict)
if aa < 4 and self.plot_z is False:
ax.set_yscale('log')
if aa == 0 or aa == 3:
ax.set_ylim(self.res_limits_d)
elif aa == 1 or aa == 2:
ax.set_ylim(self.res_limits_od)
if aa > 3 and aa < 8 and self.plot_z is False:
ax.yaxis.set_major_formatter(MultipleLocator(10))
if self.phase_limits_d is not None:
ax.set_ylim(self.phase_limits_d)
#set axes labels
if aa == 0:
if self.plot_z == False:
ax.set_ylabel('App. Res. ($\mathbf{\Omega \cdot m}$)',
fontdict=fontdict)
elif self.plot_z == True:
ax.set_ylabel('Re[Z (mV/km nT)]',
fontdict=fontdict)
elif aa == 4:
if self.plot_z == False:
ax.set_ylabel('Phase (deg)',
fontdict=fontdict)
elif self.plot_z == True:
ax.set_ylabel('Im[Z (mV/km nT)]',
fontdict=fontdict)
elif aa == 8:
ax.set_ylabel('Tipper',
fontdict=fontdict)
if aa > 7:
ax.yaxis.set_major_locator(MultipleLocator(.1))
if self.tipper_limits is not None:
ax.set_ylim(self.tipper_limits)
else:
pass
ax.set_xscale('log')
ax.set_xlim(xmin=10**(np.floor(np.log10(period[0])))*1.01,
xmax=10**(np.ceil(np.log10(period[-1])))*.99)
ax.grid(True, alpha=.25)
ylabels = ax.get_yticks().tolist()
if aa < 8:
ylabels[-1] = ''
ylabels[0] = ''
ax.set_yticklabels(ylabels)
plt.setp(ax.get_xticklabels(), visible=False)
##----------------------------------------------
#plot model response
if self.resp_object is not None:
for resp_obj in self.resp_object:
resp_z_obj = resp_obj.mt_dict[station].Z
resp_z_err = np.nan_to_num((z_obj.z-resp_z_obj.z)/z_obj.z_err)
resp_z_obj._compute_res_phase()
resp_t_obj = resp_obj.mt_dict[station].Tipper
resp_t_err = np.nan_to_num((t_obj.tipper-resp_t_obj.tipper)/t_obj.tipper_err)
#convert to apparent resistivity and phase
if self.plot_z == True:
scaling = np.zeros_like(resp_z_obj.z)
for ii in range(2):
for jj in range(2):
scaling[:, ii, jj] = 1./np.sqrt(resp_z_obj.freq)
r_plot_res = abs(resp_z_obj.z.real*scaling)
r_plot_phase = abs(resp_z_obj.z.imag*scaling)
elif self.plot_z == False:
r_plot_res = resp_z_obj.resistivity
r_plot_phase = resp_z_obj.phase
rms_xx = resp_z_err[:, 0, 0].std()
rms_xy = resp_z_err[:, 0, 1].std()
rms_yx = resp_z_err[:, 1, 0].std()
rms_yy = resp_z_err[:, 1, 1].std()
#--> make key word dictionaries for plotting
kw_xx = {'color':self.ctem,
'marker':self.mtem,
'ms':self.ms_r,
'ls':':',
'lw':self.lw_r,
'e_capsize':self.e_capsize,
'e_capthick':self.e_capthick}
kw_yy = {'color':self.ctmm,
'marker':self.mtmm,
'ms':self.ms_r,
'ls':':',
'lw':self.lw_r,
'e_capsize':self.e_capsize,
'e_capthick':self.e_capthick}
# plot data response
rerxx = mtplottools.plot_errorbar(axrxx,
period[nzxx],
r_plot_res[nzxx, 0, 0],
None,
**kw_xx)
rerxy = mtplottools.plot_errorbar(axrxy,
period[nzxy],
r_plot_res[nzxy, 0, 1],
None,
**kw_xx)
reryx = mtplottools.plot_errorbar(axryx,
period[nzyx],
r_plot_res[nzyx, 1, 0],
None,
**kw_yy)
reryy = mtplottools.plot_errorbar(axryy,
period[nzyy],
r_plot_res[nzyy, 1, 1],
None,
**kw_yy)
#plot phase
repxx = mtplottools.plot_errorbar(axpxx,
period[nzxx],
r_plot_phase[nzxx, 0, 0],
None,
**kw_xx)
repxy = mtplottools.plot_errorbar(axpxy,
period[nzxy],
r_plot_phase[nzxy, 0, 1],
None,
**kw_xx)
repyx = mtplottools.plot_errorbar(axpyx,
period[nzyx],
r_plot_phase[nzyx, 1, 0],
None,
**kw_yy)
repyy = mtplottools.plot_errorbar(axpyy,
period[nzyy],
r_plot_phase[nzyy, 1, 1],
None,
**kw_yy)
#plot tipper
if self.plot_tipper == True:
rertx = mtplottools.plot_errorbar(axtxr,
period[ntx],
resp_t_obj.tipper[ntx, 0, 0].real,
None,
**kw_xx)
rerty = mtplottools.plot_errorbar(axtyr,
period[nty],
resp_t_obj.tipper[nty, 0, 1].real,
None,
**kw_yy)
reptx = mtplottools.plot_errorbar(axtxi,
period[ntx],
resp_t_obj.tipper[ntx, 0, 0].imag,
None,
**kw_xx)
repty = mtplottools.plot_errorbar(axtyi,
period[nty],
resp_t_obj.tipper[nty, 0, 1].imag,
None,
**kw_yy)
if self.plot_tipper == False:
line_list[0] += [rerxx[0]]
line_list[1] += [rerxy[0]]
line_list[2] += [reryx[0]]
line_list[3] += [reryy[0]]
label_list[0] += ['$Z^m_{xx}$ '+
'rms={0:.2f}'.format(rms_xx)]
label_list[1] += ['$Z^m_{xy}$ '+
'rms={0:.2f}'.format(rms_xy)]
label_list[2] += ['$Z^m_{yx}$ '+
'rms={0:.2f}'.format(rms_yx)]
label_list[3] += ['$Z^m_{yy}$ '+
'rms={0:.2f}'.format(rms_yy)]
else:
line_list[0] += [rerxx[0]]
line_list[1] += [rerxy[0]]
line_list[2] += [reryx[0]]
line_list[3] += [reryy[0]]
line_list[4] += [rertx[0]]
line_list[5] += [rerty[0]]
label_list[0] += ['$Z^m_{xx}$ '+
'rms={0:.2f}'.format(rms_xx)]
label_list[1] += ['$Z^m_{xy}$ '+
'rms={0:.2f}'.format(rms_xy)]
label_list[2] += ['$Z^m_{yx}$ '+
'rms={0:.2f}'.format(rms_yx)]
label_list[3] += ['$Z^m_{yy}$ '+
'rms={0:.2f}'.format(rms_yy)]
label_list[4] += ['$T^m_{x}$ '+
'rms={0:.2f}'.format(resp_t_err[:, 0, 0].std())]
label_list[5] += ['$T^m_{y}$'+
'rms={0:.2f}'.format(resp_t_err[:, 0, 1].std())]
legend_ax_list = self.ax_list[0:4]
# if self.plot_tipper == True:
# legend_ax_list += [self.ax_list[-4], self.ax_list[-2]]
for aa, ax in enumerate(legend_ax_list):
ax.legend(line_list[aa],
label_list[aa],
loc=self.legend_loc,
bbox_to_anchor=self.legend_pos,
markerscale=self.legend_marker_scale,
borderaxespad=self.legend_border_axes_pad,
labelspacing=self.legend_label_spacing,
handletextpad=self.legend_handle_text_pad,
borderpad=self.legend_border_pad,
prop={'size':max([self.font_size, 5])})
plt.show()
def redraw_plot(self):
"""
redraw plot if parameters were changed
use this function if you updated some attributes and want to re-plot.
:Example: ::
>>> # change the color and marker of the xy components
>>> import mtpy.modeling.occam2d as occam2d
>>> ocd = occam2d.Occam2DData(r"/home/occam2d/Data.dat")
>>> p1 = ocd.plotAllResponses()
>>> #change line width
>>> p1.lw = 2
>>> p1.redraw_plot()
"""
for fig in self.fig_list:
plt.close(fig)
self.plot()
def save_figure(self, save_fn, file_format='pdf', orientation='portrait',
fig_dpi=None, close_fig='y'):
"""
save_plot will save the figure to save_fn.
Arguments:
-----------
**save_fn** : string
full path to save figure to, can be input as
* directory path -> the directory path to save to
in which the file will be saved as
save_fn/station_name_PhaseTensor.file_format
* full path -> file will be save to the given
path. If you use this option then the format
will be assumed to be provided by the path
**file_format** : [ pdf | eps | jpg | png | svg ]
file type of saved figure pdf,svg,eps...
**orientation** : [ landscape | portrait ]
orientation in which the file will be saved
*default* is portrait
**fig_dpi** : int
The resolution in dots-per-inch the file will be
saved. If None then the dpi will be that at
which the figure was made. I don't think that
it can be larger than dpi of the figure.
**close_plot** : [ y | n ]
* 'y' will close the plot after saving.
* 'n' will leave plot open
:Example: ::
>>> # to save plot as jpg
>>> import mtpy.modeling.occam2d as occam2d
>>> dfn = r"/home/occam2d/Inv1/data.dat"
>>> ocd = occam2d.Occam2DData(dfn)
>>> ps1 = ocd.plotPseudoSection()
>>> ps1.save_plot(r'/home/MT/figures', file_format='jpg')
"""
fig = plt.gcf()
if fig_dpi == None:
fig_dpi = self.fig_dpi
if os.path.isdir(save_fn) == False:
file_format = save_fn[-3:]
fig.savefig(save_fn, dpi=fig_dpi, format=file_format,
orientation=orientation, bbox_inches='tight')
else:
save_fn = os.path.join(save_fn, '_L2.'+
file_format)
fig.savefig(save_fn, dpi=fig_dpi, format=file_format,
orientation=orientation, bbox_inches='tight')
if close_fig == 'y':
plt.clf()
plt.close(fig)
else:
pass
self.fig_fn = save_fn
print 'Saved figure to: '+self.fig_fn
def update_plot(self):
"""
update any parameters that where changed using the built-in draw from
canvas.
Use this if you change an of the .fig or axes properties
:Example: ::
>>> # to change the grid lines to only be on the major ticks
>>> import mtpy.modeling.occam2d as occam2d
>>> dfn = r"/home/occam2d/Inv1/data.dat"
>>> ocd = occam2d.Occam2DData(dfn)
>>> ps1 = ocd.plotAllResponses()
>>> [ax.grid(True, which='major') for ax in [ps1.axrte,ps1.axtep]]
>>> ps1.update_plot()
"""
self.fig.canvas.draw()
def __str__(self):
"""
rewrite the string builtin to give a useful message
"""
return ("Plots data vs model response computed by WS3DINV")
#==============================================================================
# plot phase tensors
#==============================================================================
class PlotPTMaps(mtplottools.MTEllipse):
"""
Plot phase tensor maps including residual pt if response file is input.
:Plot only data for one period: ::
>>> import mtpy.modeling.ws3dinv as ws
>>> dfn = r"/home/MT/ws3dinv/Inv1/WSDataFile.dat"
>>> ptm = ws.PlotPTMaps(data_fn=dfn, plot_period_list=[0])
:Plot data and model response: ::
>>> import mtpy.modeling.ws3dinv as ws
>>> dfn = r"/home/MT/ws3dinv/Inv1/WSDataFile.dat"
>>> rfn = r"/home/MT/ws3dinv/Inv1/Test_resp.00"
>>> mfn = r"/home/MT/ws3dinv/Inv1/Test_model.00"
>>> ptm = ws.PlotPTMaps(data_fn=dfn, resp_fn=rfn, model_fn=mfn,
>>> ... plot_period_list=[0])
>>> # adjust colorbar
>>> ptm.cb_res_pad = 1.25
>>> ptm.redraw_plot()
========================== ================================================
Attributes Description
========================== ================================================
cb_pt_pad percentage from top of axes to place pt
color bar. *default* is .90
cb_res_pad percentage from bottom of axes to place
resistivity color bar. *default* is 1.2
cb_residual_tick_step tick step for residual pt. *default* is 3
cb_tick_step tick step for phase tensor color bar,
*default* is 45
data np.ndarray(n_station, n_periods, 2, 2)
impedance tensors for station data
data_fn full path to data fle
dscale scaling parameter depending on map_scale
ellipse_cmap color map for pt ellipses. *default* is
mt_bl2gr2rd
ellipse_colorby [ 'skew' | 'skew_seg' | 'phimin' | 'phimax'|
'phidet' | 'ellipticity' ] parameter to color
ellipses by. *default* is 'phimin'
ellipse_range (min, max, step) min and max of colormap, need
to input step if plotting skew_seg
ellipse_size relative size of ellipses in map_scale
ew_limits limits of plot in e-w direction in map_scale
units. *default* is None, scales to station
area
fig_aspect aspect of figure. *default* is 1
fig_dpi resolution in dots-per-inch. *default* is 300
fig_list list of matplotlib.figure instances for each
figure plotted.
fig_size [width, height] in inches of figure window
*default* is [6, 6]
font_size font size of ticklabels, axes labels are
font_size+2. *default* is 7
grid_east relative location of grid nodes in e-w direction
in map_scale units
grid_north relative location of grid nodes in n-s direction
in map_scale units
grid_z relative location of grid nodes in z direction
in map_scale units
model_fn full path to initial file
map_scale [ 'km' | 'm' ] distance units of map.
*default* is km
mesh_east np.meshgrid(grid_east, grid_north, indexing='ij')
mesh_north np.meshgrid(grid_east, grid_north, indexing='ij')
model_fn full path to model file
nodes_east relative distance betwen nodes in e-w direction
in map_scale units
nodes_north relative distance betwen nodes in n-s direction
in map_scale units
nodes_z relative distance betwen nodes in z direction
in map_scale units
ns_limits (min, max) limits of plot in n-s direction
*default* is None, viewing area is station area
pad_east padding from extreme stations in east direction
pad_north padding from extreme stations in north direction
period_list list of periods from data
plot_grid [ 'y' | 'n' ] 'y' to plot grid lines
*default* is 'n'
plot_period_list list of period index values to plot
*default* is None
plot_yn ['y' | 'n' ] 'y' to plot on instantiation
*default* is 'y'
res_cmap colormap for resisitivity values.
*default* is 'jet_r'
res_limits (min, max) resistivity limits in log scale
*default* is (0, 4)
res_model np.ndarray(n_north, n_east, n_vertical) of
model resistivity values in linear scale
residual_cmap color map for pt residuals.
*default* is 'mt_wh2or'
resp np.ndarray(n_stations, n_periods, 2, 2)
impedance tensors for model response
resp_fn full path to response file
save_path directory to save figures to
save_plots [ 'y' | 'n' ] 'y' to save plots to save_path
station_east location of stations in east direction in
map_scale units
station_fn full path to station locations file
station_names station names
station_north location of station in north direction in
map_scale units
subplot_bottom distance between axes and bottom of figure window
subplot_left distance between axes and left of figure window
subplot_right distance between axes and right of figure window
subplot_top distance between axes and top of figure window
title titiel of plot *default* is depth of slice
xminorticks location of xminorticks
yminorticks location of yminorticks
========================== ================================================
"""
def __init__(self, data_fn=None, resp_fn=None, model_fn=None, **kwargs):
self.model_fn = model_fn
self.data_fn = data_fn
self.resp_fn = resp_fn
self.save_path = kwargs.pop('save_path', None)
if self.model_fn is not None and self.save_path is None:
self.save_path = os.path.dirname(self.model_fn)
elif self.model_fn is not None and self.save_path is None:
self.save_path = os.path.dirname(self.model_fn)
if self.save_path is not None:
if not os.path.exists(self.save_path):
os.mkdir(self.save_path)
self.save_plots = kwargs.pop('save_plots', 'y')
self.plot_period_list = kwargs.pop('plot_period_list', None)
self.period_dict = None
self.map_scale = kwargs.pop('map_scale', 'km')
#make map scale
if self.map_scale == 'km':
self.dscale = 1000.
elif self.map_scale == 'm':
self.dscale = 1.
self.ew_limits = kwargs.pop('ew_limits', None)
self.ns_limits = kwargs.pop('ns_limits', None)
self.pad_east = kwargs.pop('pad_east', 2000)
self.pad_north = kwargs.pop('pad_north', 2000)
self.plot_grid = kwargs.pop('plot_grid', 'n')
self.fig_num = kwargs.pop('fig_num', 1)
self.fig_size = kwargs.pop('fig_size', [6, 6])
self.fig_dpi = kwargs.pop('dpi', 300)
self.fig_aspect = kwargs.pop('fig_aspect', 1)
self.title = kwargs.pop('title', 'on')
self.fig_list = []
self.xminorticks = kwargs.pop('xminorticks', 1000)
self.yminorticks = kwargs.pop('yminorticks', 1000)
self.residual_cmap = kwargs.pop('residual_cmap', 'mt_wh2or')
self.font_size = kwargs.pop('font_size', 7)
self.cb_tick_step = kwargs.pop('cb_tick_step', 45)
self.cb_residual_tick_step = kwargs.pop('cb_residual_tick_step', 3)
self.cb_pt_pad = kwargs.pop('cb_pt_pad', 1.2)
self.cb_res_pad = kwargs.pop('cb_res_pad', .5)
self.res_limits = kwargs.pop('res_limits', (0,4))
self.res_cmap = kwargs.pop('res_cmap', 'jet_r')
#--> set the ellipse properties -------------------
self._ellipse_dict = kwargs.pop('ellipse_dict', {'size':2})
self._read_ellipse_dict()
self.subplot_right = .99
self.subplot_left = .085
self.subplot_top = .92
self.subplot_bottom = .1
self.subplot_hspace = .2
self.subplot_wspace = .05
self.data_obj = None
self.resp_obj = None
self.model_obj = None
self.period_list = None
self.pt_data_arr = None
self.pt_resp_arr = None
self.pt_resid_arr = None
self.plot_yn = kwargs.pop('plot_yn', 'y')
if self.plot_yn == 'y':
self.plot()
def _read_files(self):
"""
get information from files
"""
#--> read in data file
self.data_obj = Data()
self.data_obj.read_data_file(self.data_fn)
#--> read response file
if self.resp_fn is not None:
self.resp_obj = Data()
self.resp_obj.read_data_file(self.resp_fn)
#--> read mode file
if self.model_fn is not None:
self.model_obj = Model()
self.model_obj.read_model_file(self.model_fn)
self._get_plot_period_list()
self._get_pt()
def _get_plot_period_list(self):
"""
get periods to plot from input or data file
"""
#--> get period list to plot
if self.plot_period_list is None:
self.plot_period_list = self.data_obj.period_list
else:
if type(self.plot_period_list) is list:
#check if entries are index values or actual periods
if type(self.plot_period_list[0]) is int:
self.plot_period_list = [self.data_obj.period_list[ii]
for ii in self.plot_period_list]
else:
pass
elif type(self.plot_period_list) is int:
self.plot_period_list = self.data_obj.period_list[self.plot_period_list]
elif type(self.plot_period_list) is float:
self.plot_period_list = [self.plot_period_list]
self.period_dict = dict([(key, value) for value, key in
enumerate(self.data_obj.period_list)])
def _get_pt(self):
"""
put pt parameters into something useful for plotting
"""
ns = len(self.data_obj.mt_dict.keys())
nf = len(self.data_obj.period_list)
data_pt_arr = np.zeros((nf, ns), dtype=[('phimin', np.float),
('phimax', np.float),
('skew', np.float),
('azimuth', np.float),
('east', np.float),
('north', np.float)])
if self.resp_fn is not None:
model_pt_arr = np.zeros((nf, ns), dtype=[('phimin', np.float),
('phimax', np.float),
('skew', np.float),
('azimuth', np.float),
('east', np.float),
('north', np.float)])
res_pt_arr = np.zeros((nf, ns), dtype=[('phimin', np.float),
('phimax', np.float),
('skew', np.float),
('azimuth', np.float),
('east', np.float),
('north', np.float),
('geometric_mean', np.float)])
for ii, key in enumerate(self.data_obj.mt_dict.keys()):
east = self.data_obj.mt_dict[key].grid_east/self.dscale
north = self.data_obj.mt_dict[key].grid_north/self.dscale
dpt = self.data_obj.mt_dict[key].pt
data_pt_arr[:, ii]['east'] = east
data_pt_arr[:, ii]['north'] = north
data_pt_arr[:, ii]['phimin'] = dpt.phimin[0]
data_pt_arr[:, ii]['phimax'] = dpt.phimax[0]
data_pt_arr[:, ii]['azimuth'] = dpt.azimuth[0]
data_pt_arr[:, ii]['skew'] = dpt.beta[0]
if self.resp_fn is not None:
mpt = self.resp_obj.mt_dict[key].pt
try:
rpt = mtpt.ResidualPhaseTensor(pt_object1=dpt,
pt_object2=mpt)
rpt = rpt.residual_pt
res_pt_arr[:, ii]['east'] = east
res_pt_arr[:, ii]['north'] = north
res_pt_arr[:, ii]['phimin'] = rpt.phimin[0]
res_pt_arr[:, ii]['phimax'] = rpt.phimax[0]
res_pt_arr[:, ii]['azimuth'] = rpt.azimuth[0]
res_pt_arr[:, ii]['skew'] = rpt.beta[0]
res_pt_arr[:, ii]['geometric_mean'] = np.sqrt(abs(rpt.phimin[0]*\
rpt.phimax[0]))
except mtex.MTpyError_PT:
print key, dpt.pt.shape, mpt.pt.shape
model_pt_arr[:, ii]['east'] = east
model_pt_arr[:, ii]['north'] = north
model_pt_arr[:, ii]['phimin'] = mpt.phimin[0]
model_pt_arr[:, ii]['phimax'] = mpt.phimax[0]
model_pt_arr[:, ii]['azimuth'] = mpt.azimuth[0]
model_pt_arr[:, ii]['skew'] = mpt.beta[0]
#make these attributes
self.pt_data_arr = data_pt_arr
if self.resp_fn is not None:
self.pt_resp_arr = model_pt_arr
self.pt_resid_arr = res_pt_arr
def plot(self):
"""
plot phase tensor maps for data and or response, each figure is of a
different period. If response is input a third column is added which is
the residual phase tensor showing where the model is not fitting the data
well. The data is plotted in km.
"""
#--> read in data first
if self.data_obj is None:
self._read_files()
# set plot properties
plt.rcParams['font.size'] = self.font_size
plt.rcParams['figure.subplot.left'] = self.subplot_left
plt.rcParams['figure.subplot.right'] = self.subplot_right
plt.rcParams['figure.subplot.bottom'] = self.subplot_bottom
plt.rcParams['figure.subplot.top'] = self.subplot_top
font_dict = {'size':self.font_size+2, 'weight':'bold'}
# make a grid of subplots
gs = gridspec.GridSpec(1, 3, hspace=self.subplot_hspace,
wspace=self.subplot_wspace)
#set some parameters for the colorbar
ckmin = float(self.ellipse_range[0])
ckmax = float(self.ellipse_range[1])
try:
ckstep = float(self.ellipse_range[2])
except IndexError:
if self.ellipse_cmap == 'mt_seg_bl2wh2rd':
raise ValueError('Need to input range as (min, max, step)')
else:
ckstep = 3
bounds = np.arange(ckmin, ckmax+ckstep, ckstep)
# set plot limits to be the station area
if self.ew_limits == None:
east_min = self.data_obj.data_array['rel_east'].min()-\
self.pad_east
east_max = self.data_obj.data_array['rel_east'].max()+\
self.pad_east
self.ew_limits = (east_min/self.dscale, east_max/self.dscale)
if self.ns_limits == None:
north_min = self.data_obj.data_array['rel_north'].min()-\
self.pad_north
north_max = self.data_obj.data_array['rel_north'].max()+\
self.pad_north
self.ns_limits = (north_min/self.dscale, north_max/self.dscale)
#-------------plot phase tensors------------------------------------
for ff, per in enumerate(self.plot_period_list):
data_ii = self.period_dict[per]
print 'Plotting Period: {0:.5g}'.format(per)
fig = plt.figure('{0:.5g}'.format(per), figsize=self.fig_size,
dpi=self.fig_dpi)
fig.clf()
if self.resp_fn is not None:
axd = fig.add_subplot(gs[0, 0], aspect='equal')
axm = fig.add_subplot(gs[0, 1], aspect='equal')
axr = fig.add_subplot(gs[0, 2], aspect='equal')
ax_list = [axd, axm, axr]
else:
axd = fig.add_subplot(gs[0, :], aspect='equal')
ax_list = [axd]
#plot model below the phase tensors
if self.model_fn is not None:
approx_depth, d_index = ws.estimate_skin_depth(self.model_obj.res_model.copy(),
self.model_obj.grid_z.copy()/self.dscale,
per,
dscale=self.dscale)
#need to add an extra row and column to east and north to make sure
#all is plotted see pcolor for details.
plot_east = np.append(self.model_obj.grid_east,
self.model_obj.grid_east[-1]*1.25)/\
self.dscale
plot_north = np.append(self.model_obj.grid_north,
self.model_obj.grid_north[-1]*1.25)/\
self.dscale
#make a mesh grid for plotting
#the 'ij' makes sure the resulting grid is in east, north
self.mesh_east, self.mesh_north = np.meshgrid(plot_east,
plot_north,
indexing='ij')
for ax in ax_list:
plot_res = np.log10(self.model_obj.res_model[:, :, d_index].T)
ax.pcolormesh(self.mesh_east,
self.mesh_north,
plot_res,
cmap=self.res_cmap,
vmin=self.res_limits[0],
vmax=self.res_limits[1])
#--> plot data phase tensors
for pt in self.pt_data_arr[data_ii]:
eheight = pt['phimin']/\
self.pt_data_arr[data_ii]['phimax'].max()*\
self.ellipse_size
ewidth = pt['phimax']/\
self.pt_data_arr[data_ii]['phimax'].max()*\
self.ellipse_size
ellipse = Ellipse((pt['east'],
pt['north']),
width=ewidth,
height=eheight,
angle=90-pt['azimuth'])
#get ellipse color
if self.ellipse_cmap.find('seg')>0:
ellipse.set_facecolor(mtcl.get_plot_color(pt[self.ellipse_colorby],
self.ellipse_colorby,
self.ellipse_cmap,
ckmin,
ckmax,
bounds=bounds))
else:
ellipse.set_facecolor(mtcl.get_plot_color(pt[self.ellipse_colorby],
self.ellipse_colorby,
self.ellipse_cmap,
ckmin,
ckmax))
axd.add_artist(ellipse)
#-----------plot response phase tensors---------------
if self.resp_fn is not None:
rcmin = np.floor(self.pt_resid_arr['geometric_mean'].min())
rcmax = np.floor(self.pt_resid_arr['geometric_mean'].max())
for mpt, rpt in zip(self.pt_resp_arr[data_ii],
self.pt_resid_arr[data_ii]):
eheight = mpt['phimin']/\
self.pt_resp_arr[data_ii]['phimax'].max()*\
self.ellipse_size
ewidth = mpt['phimax']/\
self.pt_resp_arr[data_ii]['phimax'].max()*\
self.ellipse_size
ellipsem = Ellipse((mpt['east'],
mpt['north']),
width=ewidth,
height=eheight,
angle=90-mpt['azimuth'])
#get ellipse color
if self.ellipse_cmap.find('seg')>0:
ellipsem.set_facecolor(mtcl.get_plot_color(mpt[self.ellipse_colorby],
self.ellipse_colorby,
self.ellipse_cmap,
ckmin,
ckmax,
bounds=bounds))
else:
ellipsem.set_facecolor(mtcl.get_plot_color(mpt[self.ellipse_colorby],
self.ellipse_colorby,
self.ellipse_cmap,
ckmin,
ckmax))
axm.add_artist(ellipsem)
#-----------plot residual phase tensors---------------
eheight = rpt['phimin']/\
self.pt_resid_arr[data_ii]['phimax'].max()*\
self.ellipse_size
ewidth = rpt['phimax']/\
self.pt_resid_arr[data_ii]['phimax'].max()*\
self.ellipse_size
ellipser = Ellipse((rpt['east'],
rpt['north']),
width=ewidth,
height=eheight,
angle=rpt['azimuth'])
#get ellipse color
rpt_color = np.sqrt(abs(rpt['phimin']*rpt['phimax']))
if self.ellipse_cmap.find('seg')>0:
ellipser.set_facecolor(mtcl.get_plot_color(rpt_color,
'geometric_mean',
self.residual_cmap,
ckmin,
ckmax,
bounds=bounds))
else:
ellipser.set_facecolor(mtcl.get_plot_color(rpt_color,
'geometric_mean',
self.residual_cmap,
ckmin,
ckmax))
axr.add_artist(ellipser)
#--> set axes properties
# data
axd.set_xlim(self.ew_limits)
axd.set_ylim(self.ns_limits)
axd.set_xlabel('Easting ({0})'.format(self.map_scale),
fontdict=font_dict)
axd.set_ylabel('Northing ({0})'.format(self.map_scale),
fontdict=font_dict)
#make a colorbar for phase tensors
#bb = axd.axes.get_position().bounds
bb = axd.get_position().bounds
y1 = .25*(2+(self.ns_limits[1]-self.ns_limits[0])/
(self.ew_limits[1]-self.ew_limits[0]))
cb_location = (3.35*bb[2]/5+bb[0],
y1*self.cb_pt_pad, .295*bb[2], .02)
cbaxd = fig.add_axes(cb_location)
cbd = mcb.ColorbarBase(cbaxd,
cmap=mtcl.cmapdict[self.ellipse_cmap],
norm=Normalize(vmin=ckmin,
vmax=ckmax),
orientation='horizontal')
cbd.ax.xaxis.set_label_position('top')
cbd.ax.xaxis.set_label_coords(.5, 1.75)
cbd.set_label(mtplottools.ckdict[self.ellipse_colorby])
cbd.set_ticks(np.arange(ckmin, ckmax+self.cb_tick_step,
self.cb_tick_step))
axd.text(self.ew_limits[0]*.95,
self.ns_limits[1]*.95,
'Data',
horizontalalignment='left',
verticalalignment='top',
bbox={'facecolor':'white'},
fontdict={'size':self.font_size+1})
#Model and residual
if self.resp_fn is not None:
for aa, ax in enumerate([axm, axr]):
ax.set_xlim(self.ew_limits)
ax.set_ylim(self.ns_limits)
ax.set_xlabel('Easting ({0})'.format(self.map_scale),
fontdict=font_dict)
plt.setp(ax.yaxis.get_ticklabels(), visible=False)
#make a colorbar ontop of axis
bb = ax.axes.get_position().bounds
y1 = .25*(2+(self.ns_limits[1]-self.ns_limits[0])/
(self.ew_limits[1]-self.ew_limits[0]))
cb_location = (3.35*bb[2]/5+bb[0],
y1*self.cb_pt_pad, .295*bb[2], .02)
cbax = fig.add_axes(cb_location)
if aa == 0:
cb = mcb.ColorbarBase(cbax,
cmap=mtcl.cmapdict[self.ellipse_cmap],
norm=Normalize(vmin=ckmin,
vmax=ckmax),
orientation='horizontal')
cb.ax.xaxis.set_label_position('top')
cb.ax.xaxis.set_label_coords(.5, 1.75)
cb.set_label(mtplottools.ckdict[self.ellipse_colorby])
cb.set_ticks(np.arange(ckmin, ckmax+self.cb_tick_step,
self.cb_tick_step))
ax.text(self.ew_limits[0]*.95,
self.ns_limits[1]*.95,
'Model',
horizontalalignment='left',
verticalalignment='top',
bbox={'facecolor':'white'},
fontdict={'size':self.font_size+1})
else:
cb = mcb.ColorbarBase(cbax,
cmap=mtcl.cmapdict[self.residual_cmap],
norm=Normalize(vmin=rcmin,
vmax=rcmax),
orientation='horizontal')
cb.ax.xaxis.set_label_position('top')
cb.ax.xaxis.set_label_coords(.5, 1.75)
cb.set_label(r"$\sqrt{\Phi_{min} \Phi_{max}}$")
cb_ticks = [rcmin, (rcmax-rcmin)/2, rcmax]
cb.set_ticks(cb_ticks)
ax.text(self.ew_limits[0]*.95,
self.ns_limits[1]*.95,
'Residual',
horizontalalignment='left',
verticalalignment='top',
bbox={'facecolor':'white'},
fontdict={'size':self.font_size+1})
if self.model_fn is not None:
for ax in ax_list:
ax.tick_params(direction='out')
bb = ax.axes.get_position().bounds
y1 = .25*(2-(self.ns_limits[1]-self.ns_limits[0])/
(self.ew_limits[1]-self.ew_limits[0]))
cb_position = (3.0*bb[2]/5+bb[0],
y1*self.cb_res_pad, .35*bb[2], .02)
cbax = fig.add_axes(cb_position)
cb = mcb.ColorbarBase(cbax,
cmap=self.res_cmap,
norm=Normalize(vmin=self.res_limits[0],
vmax=self.res_limits[1]),
orientation='horizontal')
cb.ax.xaxis.set_label_position('top')
cb.ax.xaxis.set_label_coords(.5, 1.5)
cb.set_label('Resistivity ($\Omega \cdot$m)')
cb_ticks = np.arange(np.floor(self.res_limits[0]),
np.ceil(self.res_limits[1]+1), 1)
cb.set_ticks(cb_ticks)
cb.set_ticklabels([mtplottools.labeldict[ctk] for ctk in cb_ticks])
plt.show()
self.fig_list.append(fig)
def redraw_plot(self):
"""
redraw plot if parameters were changed
use this function if you updated some attributes and want to re-plot.
:Example: ::
>>> # change the color and marker of the xy components
>>> import mtpy.modeling.occam2d as occam2d
>>> ocd = occam2d.Occam2DData(r"/home/occam2d/Data.dat")
>>> p1 = ocd.plotAllResponses()
>>> #change line width
>>> p1.lw = 2
>>> p1.redraw_plot()
"""
for fig in self.fig_list:
plt.close(fig)
self.plot()
def save_figure(self, save_path=None, fig_dpi=None, file_format='pdf',
orientation='landscape', close_fig='y'):
"""
save_figure will save the figure to save_fn.
Arguments:
-----------
**save_fn** : string
full path to save figure to, can be input as
* directory path -> the directory path to save to
in which the file will be saved as
save_fn/station_name_PhaseTensor.file_format
* full path -> file will be save to the given
path. If you use this option then the format
will be assumed to be provided by the path
**file_format** : [ pdf | eps | jpg | png | svg ]
file type of saved figure pdf,svg,eps...
**orientation** : [ landscape | portrait ]
orientation in which the file will be saved
*default* is portrait
**fig_dpi** : int
The resolution in dots-per-inch the file will be
saved. If None then the dpi will be that at
which the figure was made. I don't think that
it can be larger than dpi of the figure.
**close_plot** : [ y | n ]
* 'y' will close the plot after saving.
* 'n' will leave plot open
:Example: ::
>>> # to save plot as jpg
>>> import mtpy.modeling.occam2d as occam2d
>>> dfn = r"/home/occam2d/Inv1/data.dat"
>>> ocd = occam2d.Occam2DData(dfn)
>>> ps1 = ocd.plotPseudoSection()
>>> ps1.save_plot(r'/home/MT/figures', file_format='jpg')
"""
if fig_dpi == None:
fig_dpi = self.fig_dpi
if os.path.isdir(save_path) == False:
try:
os.mkdir(save_path)
except:
raise IOError('Need to input a correct directory path')
for fig in self.fig_list:
per = fig.canvas.get_window_title()
save_fn = os.path.join(save_path, 'PT_DepthSlice_{0}s.{1}'.format(
per, file_format))
fig.savefig(save_fn, dpi=fig_dpi, format=file_format,
orientation=orientation, bbox_inches='tight')
if close_fig == 'y':
plt.close(fig)
else:
pass
self.fig_fn = save_fn
print 'Saved figure to: '+self.fig_fn
#==============================================================================
# plot depth slices
#==============================================================================
class PlotDepthSlice(object):
"""
Plots depth slices of resistivity model
:Example: ::
>>> import mtpy.modeling.ws3dinv as ws
>>> mfn = r"/home/MT/ws3dinv/Inv1/Test_model.00"
>>> sfn = r"/home/MT/ws3dinv/Inv1/WSStationLocations.txt"
>>> # plot just first layer to check the formating
>>> pds = ws.PlotDepthSlice(model_fn=mfn, station_fn=sfn,
>>> ... depth_index=0, save_plots='n')
>>> #move color bar up
>>> pds.cb_location
>>> (0.64500000000000002, 0.14999999999999997, 0.3, 0.025)
>>> pds.cb_location = (.645, .175, .3, .025)
>>> pds.redraw_plot()
>>> #looks good now plot all depth slices and save them to a folder
>>> pds.save_path = r"/home/MT/ws3dinv/Inv1/DepthSlices"
>>> pds.depth_index = None
>>> pds.save_plots = 'y'
>>> pds.redraw_plot()
======================= ===================================================
Attributes Description
======================= ===================================================
cb_location location of color bar (x, y, width, height)
*default* is None, automatically locates
cb_orientation [ 'vertical' | 'horizontal' ]
*default* is horizontal
cb_pad padding between axes and colorbar
*default* is None
cb_shrink percentage to shrink colorbar by
*default* is None
climits (min, max) of resistivity color on log scale
*default* is (0, 4)
cmap name of color map *default* is 'jet_r'
data_fn full path to data file
depth_index integer value of depth slice index, shallowest
layer is 0
dscale scaling parameter depending on map_scale
ew_limits (min, max) plot limits in e-w direction in
map_scale units. *default* is None, sets viewing
area to the station area
fig_aspect aspect ratio of plot. *default* is 1
fig_dpi resolution of figure in dots-per-inch. *default* is
300
fig_list list of matplotlib.figure instances for each
depth slice
fig_size [width, height] in inches of figure size
*default* is [6, 6]
font_size size of ticklabel font in points, labels are
font_size+2. *default* is 7
grid_east relative location of grid nodes in e-w direction
in map_scale units
grid_north relative location of grid nodes in n-s direction
in map_scale units
grid_z relative location of grid nodes in z direction
in map_scale units
initial_fn full path to initial file
map_scale [ 'km' | 'm' ] distance units of map. *default* is
km
mesh_east np.meshgrid(grid_east, grid_north, indexing='ij')
mesh_north np.meshgrid(grid_east, grid_north, indexing='ij')
model_fn full path to model file
nodes_east relative distance betwen nodes in e-w direction
in map_scale units
nodes_north relative distance betwen nodes in n-s direction
in map_scale units
nodes_z relative distance betwen nodes in z direction
in map_scale units
ns_limits (min, max) plot limits in n-s direction in
map_scale units. *default* is None, sets viewing
area to the station area
plot_grid [ 'y' | 'n' ] 'y' to plot mesh grid lines.
*default* is 'n'
plot_yn [ 'y' | 'n' ] 'y' to plot on instantiation
res_model np.ndarray(n_north, n_east, n_vertical) of
model resistivity values in linear scale
save_path path to save figures to
save_plots [ 'y' | 'n' ] 'y' to save depth slices to save_path
station_east location of stations in east direction in
map_scale units
station_fn full path to station locations file
station_names station names
station_north location of station in north direction in
map_scale units
subplot_bottom distance between axes and bottom of figure window
subplot_left distance between axes and left of figure window
subplot_right distance between axes and right of figure window
subplot_top distance between axes and top of figure window
title titiel of plot *default* is depth of slice
xminorticks location of xminorticks
yminorticks location of yminorticks
======================= ===================================================
"""
def __init__(self, model_fn=None, data_fn=None, **kwargs):
self.model_fn = model_fn
self.data_fn = data_fn
self.save_path = kwargs.pop('save_path', None)
if self.model_fn is not None and self.save_path is None:
self.save_path = os.path.dirname(self.model_fn)
elif self.initial_fn is not None and self.save_path is None:
self.save_path = os.path.dirname(self.initial_fn)
if self.save_path is not None:
if not os.path.exists(self.save_path):
os.mkdir(self.save_path)
self.save_plots = kwargs.pop('save_plots', 'y')
self.depth_index = kwargs.pop('depth_index', None)
self.map_scale = kwargs.pop('map_scale', 'km')
#make map scale
if self.map_scale=='km':
self.dscale=1000.
elif self.map_scale=='m':
self.dscale=1.
self.ew_limits = kwargs.pop('ew_limits', None)
self.ns_limits = kwargs.pop('ns_limits', None)
self.plot_grid = kwargs.pop('plot_grid', 'n')
self.fig_size = kwargs.pop('fig_size', [6, 6])
self.fig_dpi = kwargs.pop('dpi', 300)
self.fig_aspect = kwargs.pop('fig_aspect', 1)
self.title = kwargs.pop('title', 'on')
self.fig_list = []
self.xminorticks = kwargs.pop('xminorticks', 1000)
self.yminorticks = kwargs.pop('yminorticks', 1000)
self.climits = kwargs.pop('climits', (0,4))
self.cmap = kwargs.pop('cmap', 'jet_r')
self.font_size = kwargs.pop('font_size', 8)
self.cb_shrink = kwargs.pop('cb_shrink', .8)
self.cb_pad = kwargs.pop('cb_pad', .01)
self.cb_orientation = kwargs.pop('cb_orientation', 'horizontal')
self.cb_location = kwargs.pop('cb_location', None)
self.subplot_right = .99
self.subplot_left = .085
self.subplot_top = .92
self.subplot_bottom = .1
self.res_model = None
self.grid_east = None
self.grid_north = None
self.grid_z = None
self.nodes_east = None
self.nodes_north = None
self.nodes_z = None
self.mesh_east = None
self.mesh_north = None
self.station_east = None
self.station_north = None
self.station_names = None
self.plot_yn = kwargs.pop('plot_yn', 'y')
if self.plot_yn == 'y':
self.plot()
def read_files(self):
"""
read in the files to get appropriate information
"""
#--> read in model file
if self.model_fn is not None:
if os.path.isfile(self.model_fn) == True:
md_model = Model()
md_model.read_model_file(self.model_fn)
self.res_model = md_model.res_model
self.grid_east = md_model.grid_east/self.dscale
self.grid_north = md_model.grid_north/self.dscale
self.grid_z = md_model.grid_z/self.dscale
self.nodes_east = md_model.nodes_east/self.dscale
self.nodes_north = md_model.nodes_north/self.dscale
self.nodes_z = md_model.nodes_z/self.dscale
else:
raise mtex.MTpyError_file_handling(
'{0} does not exist, check path'.format(self.model_fn))
#--> read in data file to get station locations
if self.data_fn is not None:
if os.path.isfile(self.data_fn) == True:
md_data = Data()
md_data.read_data_file(self.data_fn)
self.station_east = md_data.station_locations.rel_east/self.dscale
self.station_north = md_data.station_locations.rel_north/self.dscale
self.station_elev = md_data.station_locations.elev/self.dscale
self.station_names = md_data.station_locations.station
else:
print 'Could not find data file {0}'.format(self.data_fn)
def plot(self):
"""
plot depth slices
"""
#--> get information from files
self.read_files()
fdict = {'size':self.font_size+2, 'weight':'bold'}
cblabeldict={-2:'$10^{-3}$',-1:'$10^{-1}$',0:'$10^{0}$',1:'$10^{1}$',
2:'$10^{2}$',3:'$10^{3}$',4:'$10^{4}$',5:'$10^{5}$',
6:'$10^{6}$',7:'$10^{7}$',8:'$10^{8}$'}
#create an list of depth slices to plot
if self.depth_index == None:
zrange = range(self.grid_z.shape[0])
elif type(self.depth_index) is int:
zrange = [self.depth_index]
elif type(self.depth_index) is list or \
type(self.depth_index) is np.ndarray:
zrange = self.depth_index
#set the limits of the plot
if self.ew_limits == None:
if self.station_east is not None:
xlimits = (np.floor(self.station_east.min()),
np.ceil(self.station_east.max()))
else:
xlimits = (self.grid_east[5], self.grid_east[-5])
else:
xlimits = self.ew_limits
if self.ns_limits == None:
if self.station_north is not None:
ylimits = (np.floor(self.station_north.min()),
np.ceil(self.station_north.max()))
else:
ylimits = (self.grid_north[5], self.grid_north[-5])
else:
ylimits = self.ns_limits
#make a mesh grid of north and east
self.mesh_east, self.mesh_north = np.meshgrid(self.grid_east,
self.grid_north,
indexing='ij')
plt.rcParams['font.size'] = self.font_size
#--> plot depths into individual figures
for ii in zrange:
depth = '{0:.3f} ({1})'.format(self.grid_z[ii],
self.map_scale)
fig = plt.figure(depth, figsize=self.fig_size, dpi=self.fig_dpi)
plt.clf()
ax1 = fig.add_subplot(1, 1, 1, aspect=self.fig_aspect)
plot_res = np.log10(self.res_model[:, :, ii].T)
mesh_plot = ax1.pcolormesh(self.mesh_east,
self.mesh_north,
plot_res,
cmap=self.cmap,
vmin=self.climits[0],
vmax=self.climits[1])
#plot the stations
if self.station_east is not None:
for ee, nn in zip(self.station_east, self.station_north):
ax1.text(ee, nn, '*',
verticalalignment='center',
horizontalalignment='center',
fontdict={'size':5, 'weight':'bold'})
#set axis properties
ax1.set_xlim(xlimits)
ax1.set_ylim(ylimits)
ax1.xaxis.set_minor_locator(MultipleLocator(self.xminorticks/self.dscale))
ax1.yaxis.set_minor_locator(MultipleLocator(self.yminorticks/self.dscale))
ax1.set_ylabel('Northing ('+self.map_scale+')',fontdict=fdict)
ax1.set_xlabel('Easting ('+self.map_scale+')',fontdict=fdict)
ax1.set_title('Depth = {0}'.format(depth), fontdict=fdict)
#plot the grid if desired
if self.plot_grid == 'y':
east_line_xlist = []
east_line_ylist = []
for xx in self.grid_east:
east_line_xlist.extend([xx, xx])
east_line_xlist.append(None)
east_line_ylist.extend([self.grid_north.min(),
self.grid_north.max()])
east_line_ylist.append(None)
ax1.plot(east_line_xlist,
east_line_ylist,
lw=.25,
color='k')
north_line_xlist = []
north_line_ylist = []
for yy in self.grid_north:
north_line_xlist.extend([self.grid_east.min(),
self.grid_east.max()])
north_line_xlist.append(None)
north_line_ylist.extend([yy, yy])
north_line_ylist.append(None)
ax1.plot(north_line_xlist,
north_line_ylist,
lw=.25,
color='k')
#plot the colorbar
if self.cb_location is None:
if self.cb_orientation == 'horizontal':
self.cb_location = (ax1.axes.figbox.bounds[3]-.225,
ax1.axes.figbox.bounds[1]+.05,.3,.025)
elif self.cb_orientation == 'vertical':
self.cb_location = ((ax1.axes.figbox.bounds[2]-.15,
ax1.axes.figbox.bounds[3]-.21,.025,.3))
ax2 = fig.add_axes(self.cb_location)
cb = mcb.ColorbarBase(ax2,
cmap=self.cmap,
norm=Normalize(vmin=self.climits[0],
vmax=self.climits[1]),
orientation=self.cb_orientation)
if self.cb_orientation == 'horizontal':
cb.ax.xaxis.set_label_position('top')
cb.ax.xaxis.set_label_coords(.5,1.3)
elif self.cb_orientation == 'vertical':
cb.ax.yaxis.set_label_position('right')
cb.ax.yaxis.set_label_coords(1.25,.5)
cb.ax.yaxis.tick_left()
cb.ax.tick_params(axis='y',direction='in')
cb.set_label('Resistivity ($\Omega \cdot$m)',
fontdict={'size':self.font_size+1})
cb.set_ticks(np.arange(self.climits[0],self.climits[1]+1))
cb.set_ticklabels([cblabeldict[cc]
for cc in np.arange(self.climits[0],
self.climits[1]+1)])
self.fig_list.append(fig)
#--> save plots to a common folder
if self.save_plots == 'y':
fig.savefig(os.path.join(self.save_path,
"Depth_{}_{:.4f}.png".format(ii, self.grid_z[ii])),
dpi=self.fig_dpi, bbox_inches='tight')
fig.clear()
plt.close()
else:
pass
def redraw_plot(self):
"""
redraw plot if parameters were changed
use this function if you updated some attributes and want to re-plot.
:Example: ::
>>> # change the color and marker of the xy components
>>> import mtpy.modeling.occam2d as occam2d
>>> ocd = occam2d.Occam2DData(r"/home/occam2d/Data.dat")
>>> p1 = ocd.plotAllResponses()
>>> #change line width
>>> p1.lw = 2
>>> p1.redraw_plot()
"""
for fig in self.fig_list:
plt.close(fig)
self.plot()
def update_plot(self, fig):
"""
update any parameters that where changed using the built-in draw from
canvas.
Use this if you change an of the .fig or axes properties
:Example: ::
>>> # to change the grid lines to only be on the major ticks
>>> import mtpy.modeling.occam2d as occam2d
>>> dfn = r"/home/occam2d/Inv1/data.dat"
>>> ocd = occam2d.Occam2DData(dfn)
>>> ps1 = ocd.plotAllResponses()
>>> [ax.grid(True, which='major') for ax in [ps1.axrte,ps1.axtep]]
>>> ps1.update_plot()
"""
fig.canvas.draw()
def __str__(self):
"""
rewrite the string builtin to give a useful message
"""
return ("Plots depth slices of model from WS3DINV")
#==============================================================================
# plot slices
#==============================================================================
class PlotSlices(object):
# """
# plot all slices and be able to scroll through the model
#
# :Example: ::
#
# >>> import mtpy.modeling.modem as modem
# >>> mfn = r"/home/modem/Inv1/Modular_NLCG_100.rho"
# >>> dfn = r"/home/modem/Inv1/ModEM_data.dat"
# >>> pds = ws.PlotSlices(model_fn=mfn, data_fn=dfn)
#
# ======================= ===================================================
# Buttons Description
# ======================= ===================================================
# 'e' moves n-s slice east by one model block
# 'w' moves n-s slice west by one model block
# 'n' moves e-w slice north by one model block
# 'm' moves e-w slice south by one model block
# 'd' moves depth slice down by one model block
# 'u' moves depth slice up by one model block
# ======================= ===================================================
#
#
# ======================= ===================================================
# Attributes Description
# ======================= ===================================================
# ax_en matplotlib.axes instance for depth slice map view
# ax_ez matplotlib.axes instance for e-w slice
# ax_map matplotlib.axes instance for location map
# ax_nz matplotlib.axes instance for n-s slice
# climits (min , max) color limits on resistivity in log
# scale. *default* is (0, 4)
# cmap name of color map for resisitiviy.
# *default* is 'jet_r'
# data_fn full path to data file name
# dscale scaling parameter depending on map_scale
# east_line_xlist list of line nodes of east grid for faster plotting
# east_line_ylist list of line nodes of east grid for faster plotting
# ew_limits (min, max) limits of e-w in map_scale units
# *default* is None and scales to station area
# fig matplotlib.figure instance for figure
# fig_aspect aspect ratio of plots. *default* is 1
# fig_dpi resolution of figure in dots-per-inch
# *default* is 300
# fig_num figure instance number
# fig_size [width, height] of figure window.
# *default* is [6,6]
# font_dict dictionary of font keywords, internally created
# font_size size of ticklables in points, axes labes are
# font_size+2. *default* is 7
# grid_east relative location of grid nodes in e-w direction
# in map_scale units
# grid_north relative location of grid nodes in n-s direction
# in map_scale units
# grid_z relative location of grid nodes in z direction
# in map_scale units
# index_east index value of grid_east being plotted
# index_north index value of grid_north being plotted
# index_vertical index value of grid_z being plotted
# initial_fn full path to initial file
# key_press matplotlib.canvas.connect instance
# map_scale [ 'm' | 'km' ] scale of map. *default* is km
# mesh_east np.meshgrid(grid_east, grid_north)[0]
# mesh_en_east np.meshgrid(grid_east, grid_north)[0]
# mesh_en_north np.meshgrid(grid_east, grid_north)[1]
# mesh_ez_east np.meshgrid(grid_east, grid_z)[0]
# mesh_ez_vertical np.meshgrid(grid_east, grid_z)[1]
# mesh_north np.meshgrid(grid_east, grid_north)[1]
# mesh_nz_north np.meshgrid(grid_north, grid_z)[0]
# mesh_nz_vertical np.meshgrid(grid_north, grid_z)[1]
# model_fn full path to model file
# ms size of station markers in points. *default* is 2
# nodes_east relative distance betwen nodes in e-w direction
# in map_scale units
# nodes_north relative distance betwen nodes in n-s direction
# in map_scale units
# nodes_z relative distance betwen nodes in z direction
# in map_scale units
# north_line_xlist list of line nodes north grid for faster plotting
# north_line_ylist list of line nodes north grid for faster plotting
# ns_limits (min, max) limits of plots in n-s direction
# *default* is None, set veiwing area to station area
# plot_yn [ 'y' | 'n' ] 'y' to plot on instantiation
# *default* is 'y'
# res_model np.ndarray(n_north, n_east, n_vertical) of
# model resistivity values in linear scale
# station_color color of station marker. *default* is black
# station_dict_east location of stations for each east grid row
# station_dict_north location of stations for each north grid row
# station_east location of stations in east direction
# station_fn full path to station file
# station_font_color color of station label
# station_font_pad padding between station marker and label
# station_font_rotation angle of station label
# station_font_size font size of station label
# station_font_weight weight of font for station label
# station_id [min, max] index values for station labels
# station_marker station marker
# station_names name of stations
# station_north location of stations in north direction
# subplot_bottom distance between axes and bottom of figure window
# subplot_hspace distance between subplots in vertical direction
# subplot_left distance between axes and left of figure window
# subplot_right distance between axes and right of figure window
# subplot_top distance between axes and top of figure window
# subplot_wspace distance between subplots in horizontal direction
# title title of plot
# z_limits (min, max) limits in vertical direction,
# ======================= ===================================================
#
# """
def __init__(self, model_fn, data_fn=None, **kwargs):
self.model_fn = model_fn
self.data_fn = data_fn
self.fig_num = kwargs.pop('fig_num', 1)
self.fig_size = kwargs.pop('fig_size', [6, 6])
self.fig_dpi = kwargs.pop('dpi', 300)
self.fig_aspect = kwargs.pop('fig_aspect', 1)
self.title = kwargs.pop('title', 'on')
self.font_size = kwargs.pop('font_size', 7)
self.subplot_wspace = .20
self.subplot_hspace = .30
self.subplot_right = .98
self.subplot_left = .08
self.subplot_top = .97
self.subplot_bottom = .1
self.index_vertical = kwargs.pop('index_vertical', 0)
self.index_east = kwargs.pop('index_east', 0)
self.index_north = kwargs.pop('index_north', 0)
self.cmap = kwargs.pop('cmap', 'jet_r')
self.climits = kwargs.pop('climits', (0, 4))
self.map_scale = kwargs.pop('map_scale', 'km')
#make map scale
if self.map_scale=='km':
self.dscale=1000.
elif self.map_scale=='m':
self.dscale=1.
self.ew_limits = kwargs.pop('ew_limits', None)
self.ns_limits = kwargs.pop('ns_limits', None)
self.z_limits = kwargs.pop('z_limits', None)
self.res_model = None
self.grid_east = None
self.grid_north = None
self.grid_z = None
self.nodes_east = None
self.nodes_north = None
self.nodes_z = None
self.mesh_east = None
self.mesh_north = None
self.station_east = None
self.station_north = None
self.station_names = None
self.station_id = kwargs.pop('station_id', None)
self.station_font_size = kwargs.pop('station_font_size', 8)
self.station_font_pad = kwargs.pop('station_font_pad', 1.0)
self.station_font_weight = kwargs.pop('station_font_weight', 'bold')
self.station_font_rotation = kwargs.pop('station_font_rotation', 60)
self.station_font_color = kwargs.pop('station_font_color', 'k')
self.station_marker = kwargs.pop('station_marker',
r"$\blacktriangledown$")
self.station_color = kwargs.pop('station_color', 'k')
self.ms = kwargs.pop('ms', 10)
self.plot_yn = kwargs.pop('plot_yn', 'y')
if self.plot_yn == 'y':
self.plot()
def read_files(self):
"""
read in the files to get appropriate information
"""
#--> read in model file
if self.model_fn is not None:
if os.path.isfile(self.model_fn) == True:
md_model = Model()
md_model.read_model_file(self.model_fn)
self.res_model = md_model.res_model
self.grid_east = md_model.grid_east/self.dscale
self.grid_north = md_model.grid_north/self.dscale
self.grid_z = md_model.grid_z/self.dscale
self.nodes_east = md_model.nodes_east/self.dscale
self.nodes_north = md_model.nodes_north/self.dscale
self.nodes_z = md_model.nodes_z/self.dscale
else:
raise mtex.MTpyError_file_handling(
'{0} does not exist, check path'.format(self.model_fn))
#--> read in data file to get station locations
if self.data_fn is not None:
if os.path.isfile(self.data_fn) == True:
md_data = Data()
md_data.read_data_file(self.data_fn)
self.station_east = md_data.station_locations.rel_east/self.dscale
self.station_north = md_data.station_locations.rel_north/self.dscale
self.station_names = md_data.station_locations.station
self.station_elev = md_data.station_locations.elev/self.dscale
else:
print 'Could not find data file {0}'.format(self.data_fn)
def plot(self):
"""
plot:
east vs. vertical,
north vs. vertical,
east vs. north
"""
self.read_files()
self.get_station_grid_locations()
print "=============== ==============================================="
print " Buttons Description "
print "=============== ==============================================="
print " 'e' moves n-s slice east by one model block"
print " 'w' moves n-s slice west by one model block"
print " 'n' moves e-w slice north by one model block"
print " 'm' moves e-w slice south by one model block"
print " 'd' moves depth slice down by one model block"
print " 'u' moves depth slice up by one model block"
print "=============== ==============================================="
self.font_dict = {'size':self.font_size+2, 'weight':'bold'}
#--> set default font size
plt.rcParams['font.size'] = self.font_size
#set the limits of the plot
if self.ew_limits == None:
if self.station_east is not None:
self.ew_limits = (np.floor(self.station_east.min()),
np.ceil(self.station_east.max()))
else:
self.ew_limits = (self.grid_east[5], self.grid_east[-5])
if self.ns_limits == None:
if self.station_north is not None:
self.ns_limits = (np.floor(self.station_north.min()),
np.ceil(self.station_north.max()))
else:
self.ns_limits = (self.grid_north[5], self.grid_north[-5])
if self.z_limits == None:
depth_limit = max([(abs(self.ew_limits[0])+abs(self.ew_limits[1])),
(abs(self.ns_limits[0])+abs(self.ns_limits[1]))])
self.z_limits = (-5000/self.dscale, depth_limit)
self.fig = plt.figure(self.fig_num, figsize=self.fig_size,
dpi=self.fig_dpi)
plt.clf()
gs = gridspec.GridSpec(2, 2,
wspace=self.subplot_wspace,
left=self.subplot_left,
top=self.subplot_top,
bottom=self.subplot_bottom,
right=self.subplot_right,
hspace=self.subplot_hspace)
#make subplots
self.ax_ez = self.fig.add_subplot(gs[0, 0], aspect=self.fig_aspect)
self.ax_nz = self.fig.add_subplot(gs[1, 1], aspect=self.fig_aspect)
self.ax_en = self.fig.add_subplot(gs[1, 0], aspect=self.fig_aspect)
self.ax_map = self.fig.add_subplot(gs[0, 1])
#make grid meshes being sure the indexing is correct
self.mesh_ez_east, self.mesh_ez_vertical = np.meshgrid(self.grid_east,
self.grid_z,
indexing='ij')
self.mesh_nz_north, self.mesh_nz_vertical = np.meshgrid(self.grid_north,
self.grid_z,
indexing='ij')
self.mesh_en_east, self.mesh_en_north = np.meshgrid(self.grid_east,
self.grid_north,
indexing='ij')
#--> plot east vs vertical
self._update_ax_ez()
#--> plot north vs vertical
self._update_ax_nz()
#--> plot east vs north
self._update_ax_en()
#--> plot the grid as a map view
self._update_map()
#plot color bar
cbx = mcb.make_axes(self.ax_map, fraction=.15, shrink=.75, pad = .15)
cb = mcb.ColorbarBase(cbx[0],
cmap=self.cmap,
norm=Normalize(vmin=self.climits[0],
vmax=self.climits[1]))
cb.ax.yaxis.set_label_position('right')
cb.ax.yaxis.set_label_coords(1.25,.5)
cb.ax.yaxis.tick_left()
cb.ax.tick_params(axis='y',direction='in')
cb.set_label('Resistivity ($\Omega \cdot$m)',
fontdict={'size':self.font_size+1})
cb.set_ticks(np.arange(np.ceil(self.climits[0]),
np.floor(self.climits[1]+1)))
cblabeldict={-2:'$10^{-3}$',-1:'$10^{-1}$',0:'$10^{0}$',1:'$10^{1}$',
2:'$10^{2}$',3:'$10^{3}$',4:'$10^{4}$',5:'$10^{5}$',
6:'$10^{6}$',7:'$10^{7}$',8:'$10^{8}$'}
cb.set_ticklabels([cblabeldict[cc]
for cc in np.arange(np.ceil(self.climits[0]),
np.floor(self.climits[1]+1))])
plt.show()
self.key_press = self.fig.canvas.mpl_connect('key_press_event',
self.on_key_press)
def on_key_press(self, event):
"""
on a key press change the slices
"""
key_press = event.key
if key_press == 'n':
if self.index_north == self.grid_north.size:
print 'Already at northern most grid cell'
else:
self.index_north += 1
if self.index_north > self.grid_north.size:
self.index_north = self.grid_north.size
self._update_ax_ez()
self._update_map()
if key_press == 'm':
if self.index_north == 0:
print 'Already at southern most grid cell'
else:
self.index_north -= 1
if self.index_north < 0:
self.index_north = 0
self._update_ax_ez()
self._update_map()
if key_press == 'e':
if self.index_east == self.grid_east.size:
print 'Already at eastern most grid cell'
else:
self.index_east += 1
if self.index_east > self.grid_east.size:
self.index_east = self.grid_east.size
self._update_ax_nz()
self._update_map()
if key_press == 'w':
if self.index_east == 0:
print 'Already at western most grid cell'
else:
self.index_east -= 1
if self.index_east < 0:
self.index_east = 0
self._update_ax_nz()
self._update_map()
if key_press == 'd':
if self.index_vertical == self.grid_z.size:
print 'Already at deepest grid cell'
else:
self.index_vertical += 1
if self.index_vertical > self.grid_z.size:
self.index_vertical = self.grid_z.size
self._update_ax_en()
print 'Depth = {0:.5g} ({1})'.format(self.grid_z[self.index_vertical],
self.map_scale)
if key_press == 'u':
if self.index_vertical == 0:
print 'Already at surface grid cell'
else:
self.index_vertical -= 1
if self.index_vertical < 0:
self.index_vertical = 0
self._update_ax_en()
print 'Depth = {0:.5gf} ({1})'.format(self.grid_z[self.index_vertical],
self.map_scale)
def _update_ax_ez(self):
"""
update east vs vertical plot
"""
self.ax_ez.cla()
plot_ez = np.log10(self.res_model[self.index_north, :, :])
self.ax_ez.pcolormesh(self.mesh_ez_east,
self.mesh_ez_vertical,
plot_ez,
cmap=self.cmap,
vmin=self.climits[0],
vmax=self.climits[1])
#plot stations
for sx in self.station_dict_north[self.grid_north[self.index_north]]:
self.ax_ez.text(sx,
0,
self.station_marker,
horizontalalignment='center',
verticalalignment='baseline',
fontdict={'size':self.ms,
'color':self.station_color})
self.ax_ez.set_xlim(self.ew_limits)
self.ax_ez.set_ylim(self.z_limits[1], self.z_limits[0])
self.ax_ez.set_ylabel('Depth ({0})'.format(self.map_scale),
fontdict=self.font_dict)
self.ax_ez.set_xlabel('Easting ({0})'.format(self.map_scale),
fontdict=self.font_dict)
self.fig.canvas.draw()
self._update_map()
def _update_ax_nz(self):
"""
update east vs vertical plot
"""
self.ax_nz.cla()
plot_nz = np.log10(self.res_model[:, self.index_east, :])
self.ax_nz.pcolormesh(self.mesh_nz_north,
self.mesh_nz_vertical,
plot_nz,
cmap=self.cmap,
vmin=self.climits[0],
vmax=self.climits[1])
#plot stations
for sy in self.station_dict_east[self.grid_east[self.index_east]]:
self.ax_nz.text(sy,
0,
self.station_marker,
horizontalalignment='center',
verticalalignment='baseline',
fontdict={'size':self.ms,
'color':self.station_color})
self.ax_nz.set_xlim(self.ns_limits)
self.ax_nz.set_ylim(self.z_limits[1], self.z_limits[0])
self.ax_nz.set_xlabel('Northing ({0})'.format(self.map_scale),
fontdict=self.font_dict)
self.ax_nz.set_ylabel('Depth ({0})'.format(self.map_scale),
fontdict=self.font_dict)
self.fig.canvas.draw()
self._update_map()
def _update_ax_en(self):
"""
update east vs vertical plot
"""
self.ax_en.cla()
plot_en = np.log10(self.res_model[:, :, self.index_vertical].T)
self.ax_en.pcolormesh(self.mesh_en_east,
self.mesh_en_north,
plot_en,
cmap=self.cmap,
vmin=self.climits[0],
vmax=self.climits[1])
self.ax_en.set_xlim(self.ew_limits)
self.ax_en.set_ylim(self.ns_limits)
self.ax_en.set_ylabel('Northing ({0})'.format(self.map_scale),
fontdict=self.font_dict)
self.ax_en.set_xlabel('Easting ({0})'.format(self.map_scale),
fontdict=self.font_dict)
#--> plot the stations
if self.station_east is not None:
for ee, nn, elev, name in zip(self.station_east,
self.station_north,
self.station_elev,
self.station_names):
if elev <= self.grid_z[self.index_vertical]:
self.ax_en.text(ee, nn, '+',
verticalalignment='center',
horizontalalignment='center',
fontdict={'size':7, 'weight':'bold',
'color':(.75, 0, 0)})
self.ax_en.text(ee, nn, name[2:],
verticalalignment='center',
horizontalalignment='center',
fontdict={'size':7, 'weight':'bold',
'color':(.75, 0, 0)})
self.fig.canvas.draw()
self._update_map()
def _update_map(self):
self.ax_map.cla()
self.east_line_xlist = []
self.east_line_ylist = []
for xx in self.grid_east:
self.east_line_xlist.extend([xx, xx])
self.east_line_xlist.append(None)
self.east_line_ylist.extend([self.grid_north.min(),
self.grid_north.max()])
self.east_line_ylist.append(None)
self.ax_map.plot(self.east_line_xlist,
self.east_line_ylist,
lw=.25,
color='k')
self.north_line_xlist = []
self.north_line_ylist = []
for yy in self.grid_north:
self.north_line_xlist.extend([self.grid_east.min(),
self.grid_east.max()])
self.north_line_xlist.append(None)
self.north_line_ylist.extend([yy, yy])
self.north_line_ylist.append(None)
self.ax_map.plot(self.north_line_xlist,
self.north_line_ylist,
lw=.25,
color='k')
#--> e-w indication line
self.ax_map.plot([self.grid_east.min(),
self.grid_east.max()],
[self.grid_north[self.index_north+1],
self.grid_north[self.index_north+1]],
lw=1,
color='g')
#--> e-w indication line
self.ax_map.plot([self.grid_east[self.index_east+1],
self.grid_east[self.index_east+1]],
[self.grid_north.min(),
self.grid_north.max()],
lw=1,
color='b')
#--> plot the stations
if self.station_east is not None:
for ee, nn in zip(self.station_east, self.station_north):
self.ax_map.text(ee, nn, '*',
verticalalignment='center',
horizontalalignment='center',
fontdict={'size':5, 'weight':'bold'})
self.ax_map.set_xlim(self.ew_limits)
self.ax_map.set_ylim(self.ns_limits)
self.ax_map.set_ylabel('Northing ({0})'.format(self.map_scale),
fontdict=self.font_dict)
self.ax_map.set_xlabel('Easting ({0})'.format(self.map_scale),
fontdict=self.font_dict)
#plot stations
self.ax_map.text(self.ew_limits[0]*.95, self.ns_limits[1]*.95,
'{0:.5g} ({1})'.format(self.grid_z[self.index_vertical],
self.map_scale),
horizontalalignment='left',
verticalalignment='top',
bbox={'facecolor': 'white'},
fontdict=self.font_dict)
self.fig.canvas.draw()
def get_station_grid_locations(self):
"""
get the grid line on which a station resides for plotting
"""
self.station_dict_east = dict([(gx, []) for gx in self.grid_east])
self.station_dict_north = dict([(gy, []) for gy in self.grid_north])
if self.station_east is not None:
for ss, sx in enumerate(self.station_east):
gx = np.where(self.grid_east <= sx)[0][-1]
self.station_dict_east[self.grid_east[gx]].append(self.station_north[ss])
for ss, sy in enumerate(self.station_north):
gy = np.where(self.grid_north <= sy)[0][-1]
self.station_dict_north[self.grid_north[gy]].append(self.station_east[ss])
else:
return
def redraw_plot(self):
"""
redraw plot if parameters were changed
use this function if you updated some attributes and want to re-plot.
:Example: ::
>>> # change the color and marker of the xy components
>>> import mtpy.modeling.occam2d as occam2d
>>> ocd = occam2d.Occam2DData(r"/home/occam2d/Data.dat")
>>> p1 = ocd.plotAllResponses()
>>> #change line width
>>> p1.lw = 2
>>> p1.redraw_plot()
"""
plt.close(self.fig)
self.plot()
def save_figure(self, save_fn=None, fig_dpi=None, file_format='pdf',
orientation='landscape', close_fig='y'):
"""
save_figure will save the figure to save_fn.
Arguments:
-----------
**save_fn** : string
full path to save figure to, can be input as
* directory path -> the directory path to save to
in which the file will be saved as
save_fn/station_name_PhaseTensor.file_format
* full path -> file will be save to the given
path. If you use this option then the format
will be assumed to be provided by the path
**file_format** : [ pdf | eps | jpg | png | svg ]
file type of saved figure pdf,svg,eps...
**orientation** : [ landscape | portrait ]
orientation in which the file will be saved
*default* is portrait
**fig_dpi** : int
The resolution in dots-per-inch the file will be
saved. If None then the dpi will be that at
which the figure was made. I don't think that
it can be larger than dpi of the figure.
**close_plot** : [ y | n ]
* 'y' will close the plot after saving.
* 'n' will leave plot open
:Example: ::
>>> # to save plot as jpg
>>> import mtpy.modeling.occam2d as occam2d
>>> dfn = r"/home/occam2d/Inv1/data.dat"
>>> ocd = occam2d.Occam2DData(dfn)
>>> ps1 = ocd.plotPseudoSection()
>>> ps1.save_plot(r'/home/MT/figures', file_format='jpg')
"""
if fig_dpi == None:
fig_dpi = self.fig_dpi
if os.path.isdir(save_fn) == False:
file_format = save_fn[-3:]
self.fig.savefig(save_fn, dpi=fig_dpi, format=file_format,
orientation=orientation, bbox_inches='tight')
else:
save_fn = os.path.join(save_fn, '_E{0}_N{1}_Z{2}.{3}'.format(
self.index_east, self.index_north,
self.index_vertical, file_format))
self.fig.savefig(save_fn, dpi=fig_dpi, format=file_format,
orientation=orientation, bbox_inches='tight')
if close_fig == 'y':
plt.clf()
plt.close(self.fig)
else:
pass
self.fig_fn = save_fn
print 'Saved figure to: '+self.fig_fn
#==============================================================================
# plot rms maps
#==============================================================================
class Plot_RMS_Maps(object):
"""
plots the RMS as (data-model)/(error) in map view for all components
of the data file. Gets this infomration from the .res file output
by ModEM.
Arguments:
------------------
**residual_fn** : string
full path to .res file
=================== =======================================================
Attributes Description
=================== =======================================================
fig matplotlib.figure instance for a single plot
fig_dpi dots-per-inch resolution of figure *default* is 200
fig_num number of fig instance *default* is 1
fig_size size of figure in inches [width, height]
*default* is [7,6]
font_size font size of tick labels, axis labels are +2
*default* is 8
marker marker style for station rms,
see matplotlib.line for options,
*default* is 's' --> square
marker_size size of marker in points. *default* is 10
pad_x padding in map units from edge of the axis to stations
at the extremeties in longitude.
*default* is 1/2 tick_locator
pad_y padding in map units from edge of the axis to stations
at the extremeties in latitude.
*default* is 1/2 tick_locator
period_index index of the period you want to plot according to
self.residual.period_list. *default* is 1
plot_yn [ 'y' | 'n' ] default is 'y' to plot on instantiation
plot_z_list internal variable for plotting
residual modem.Data instance that holds all the information
from the residual_fn given
residual_fn full path to .res file
rms_cmap matplotlib.cm object for coloring the markers
rms_cmap_dict dictionary of color values for rms_cmap
rms_max maximum rms to plot. *default* is 5.0
rms_min minimum rms to plot. *default* is 1.0
save_path path to save figures to. *default* is directory of
residual_fn
subplot_bottom spacing from axis to bottom of figure canvas.
*default* is .1
subplot_hspace horizontal spacing between subplots.
*default* is .1
subplot_left spacing from axis to left of figure canvas.
*default* is .1
subplot_right spacing from axis to right of figure canvas.
*default* is .9
subplot_top spacing from axis to top of figure canvas.
*default* is .95
subplot_vspace vertical spacing between subplots.
*default* is .01
tick_locator increment for x and y major ticks. *default* is
limits/5
=================== =======================================================
=================== =======================================================
Methods Description
=================== =======================================================
plot plot rms maps for a single period
plot_loop loop over all frequencies and save figures to save_path
read_residual_fn read in residual_fn
redraw_plot after updating attributes call redraw_plot to
well redraw the plot
save_figure save the figure to a file
=================== =======================================================
:Example: ::
>>> import mtpy.modeling.modem as modem
>>> rms_plot = Plot_RMS_Maps(r"/home/ModEM/Inv1/mb_NLCG_030.res")
>>> # change some attributes
>>> rms_plot.fig_size = [6, 4]
>>> rms_plot.rms_max = 3
>>> rms_plot.redraw_plot()
>>> # happy with the look now loop over all periods
>>> rms_plot.plot_loop()
"""
def __init__(self, residual_fn, **kwargs):
self.residual_fn = residual_fn
self.residual = None
self.save_path = kwargs.pop('save_path', os.path.dirname(self.residual_fn))
self.period_index = kwargs.pop('period_index', 0)
self.subplot_left = kwargs.pop('subplot_left', .1)
self.subplot_right = kwargs.pop('subplot_right', .9)
self.subplot_top = kwargs.pop('subplot_top', .95)
self.subplot_bottom = kwargs.pop('subplot_bottom', .1)
self.subplot_hspace = kwargs.pop('subplot_hspace', .1)
self.subplot_vspace = kwargs.pop('subplot_vspace', .01)
self.font_size = kwargs.pop('font_size', 8)
self.fig_size = kwargs.pop('fig_size', [7.75, 6.75])
self.fig_dpi = kwargs.pop('fig_dpi', 200)
self.fig_num = kwargs.pop('fig_num', 1)
self.fig = None
self.marker = kwargs.pop('marker', 's')
self.marker_size = kwargs.pop('marker_size', 10)
self.rms_max = kwargs.pop('rms_max', 5)
self.rms_min = kwargs.pop('rms_min', 0)
self.tick_locator = kwargs.pop('tick_locator', None)
self.pad_x = kwargs.pop('pad_x', None)
self.pad_y = kwargs.pop('pad_y', None)
self.plot_yn = kwargs.pop('plot_yn', 'y')
# colormap for rms, goes white to black from 0 to rms max and
# red below 1 to show where the data is being over fit
self.rms_cmap_dict = {'red':((0.0, 1.0, 1.0),
(0.2, 1.0, 1.0),
(1.0, 0.0, 0.0)),
'green':((0.0, 0.0, 0.0),
(0.2, 1.0, 1.0),
(1.0, 0.0, 0.0)),
'blue':((0.0, 0.0, 0.0),
(0.2, 1.0, 1.0),
(1.0, 0.0, 0.0))}
self.rms_cmap = colors.LinearSegmentedColormap('rms_cmap',
self.rms_cmap_dict,
256)
self.plot_z_list = [{'label':r'$Z_{xx}$', 'index':(0, 0), 'plot_num':1},
{'label':r'$Z_{xy}$', 'index':(0, 1), 'plot_num':2},
{'label':r'$Z_{yx}$', 'index':(1, 0), 'plot_num':3},
{'label':r'$Z_{yy}$', 'index':(1, 1), 'plot_num':4},
{'label':r'$T_{x}$', 'index':(0, 0), 'plot_num':5},
{'label':r'$T_{y}$', 'index':(0, 1), 'plot_num':6}]
if self.plot_yn == 'y':
self.plot()
def read_residual_fn(self):
if self.residual is None:
self.residual = Data()
self.residual.read_data_file(self.residual_fn)
else:
pass
def plot(self):
"""
plot rms in map view
"""
self.read_residual_fn()
font_dict = {'size':self.font_size+2, 'weight':'bold'}
rms_1 = 1./self.rms_max
if self.tick_locator is None:
x_locator = np.round((self.residual.data_array['lon'].max()-
self.residual.data_array['lon'].min())/5, 2)
y_locator = np.round((self.residual.data_array['lat'].max()-
self.residual.data_array['lat'].min())/5, 2)
if x_locator > y_locator:
self.tick_locator = x_locator
elif x_locator < y_locator:
self.tick_locator = y_locator
if self.pad_x is None:
self.pad_x = self.tick_locator/2
if self.pad_y is None:
self.pad_y = self.tick_locator/2
plt.rcParams['font.size'] = self.font_size
plt.rcParams['figure.subplot.left'] = self.subplot_left
plt.rcParams['figure.subplot.right'] = self.subplot_right
plt.rcParams['figure.subplot.bottom'] = self.subplot_bottom
plt.rcParams['figure.subplot.top'] = self.subplot_top
plt.rcParams['figure.subplot.wspace'] = self.subplot_hspace
plt.rcParams['figure.subplot.hspace'] = self.subplot_vspace
self.fig = plt.figure(self.fig_num, self.fig_size, dpi=self.fig_dpi)
for p_dict in self.plot_z_list:
ax = self.fig.add_subplot(3, 2, p_dict['plot_num'], aspect='equal')
ii = p_dict['index'][0]
jj = p_dict['index'][0]
for r_arr in self.residual.data_array:
# calulate the rms self.residual/error
if p_dict['plot_num'] < 5:
rms = r_arr['z'][self.period_index, ii, jj].__abs__()/\
(r_arr['z_err'][self.period_index, ii, jj].real)
else:
rms = r_arr['tip'][self.period_index, ii, jj].__abs__()/\
(r_arr['tip_err'][self.period_index, ii, jj].real)
#color appropriately
if np.nan_to_num(rms) == 0.0:
marker_color = (1, 1, 1)
marker = '.'
marker_size = .1
marker_edge_color = (1, 1, 1)
if rms > self.rms_max:
marker_color = (0, 0, 0)
marker = self.marker
marker_size = self.marker_size
marker_edge_color = (0, 0, 0)
elif rms >= 1 and rms <= self.rms_max:
r_color = 1-rms/self.rms_max+rms_1
marker_color = (r_color, r_color, r_color)
marker = self.marker
marker_size = self.marker_size
marker_edge_color = (0, 0, 0)
elif rms < 1:
r_color = 1-rms/self.rms_max
marker_color = (1, r_color, r_color)
marker = self.marker
marker_size = self.marker_size
marker_edge_color = (0, 0, 0)
ax.plot(r_arr['lon'], r_arr['lat'],
marker=marker,
ms=marker_size,
mec=marker_edge_color,
mfc=marker_color,
zorder=3)
if p_dict['plot_num'] == 1 or p_dict['plot_num'] == 3:
ax.set_ylabel('Latitude (deg)', fontdict=font_dict)
plt.setp(ax.get_xticklabels(), visible=False)
elif p_dict['plot_num'] == 2 or p_dict['plot_num'] == 4:
plt.setp(ax.get_xticklabels(), visible=False)
plt.setp(ax.get_yticklabels(), visible=False)
elif p_dict['plot_num'] == 6:
plt.setp(ax.get_yticklabels(), visible=False)
ax.set_xlabel('Longitude (deg)', fontdict=font_dict)
else:
ax.set_xlabel('Longitude (deg)', fontdict=font_dict)
ax.set_ylabel('Latitude (deg)', fontdict=font_dict)
ax.text(self.residual.data_array['lon'].min()+.005-self.pad_x,
self.residual.data_array['lat'].max()-.005+self.pad_y,
p_dict['label'],
verticalalignment='top',
horizontalalignment='left',
bbox={'facecolor':'white'},
zorder=3)
ax.tick_params(direction='out')
ax.grid(zorder=0, color=(.75, .75, .75))
#[line.set_zorder(3) for line in ax.lines]
ax.set_xlim(self.residual.data_array['lon'].min()-self.pad_x,
self.residual.data_array['lon'].max()+self.pad_x)
ax.set_ylim(self.residual.data_array['lat'].min()-self.pad_y,
self.residual.data_array['lat'].max()+self.pad_y)
ax.xaxis.set_major_locator(MultipleLocator(self.tick_locator))
ax.yaxis.set_major_locator(MultipleLocator(self.tick_locator))
ax.xaxis.set_major_formatter(FormatStrFormatter('%2.2f'))
ax.yaxis.set_major_formatter(FormatStrFormatter('%2.2f'))
#cb_ax = mcb.make_axes(ax, orientation='vertical', fraction=.1)
cb_ax = self.fig.add_axes([self.subplot_right+.02, .225, .02, .45])
color_bar = mcb.ColorbarBase(cb_ax,
cmap=self.rms_cmap,
norm=colors.Normalize(vmin=self.rms_min,
vmax=self.rms_max),
orientation='vertical')
color_bar.set_label('RMS', fontdict=font_dict)
self.fig.suptitle('period = {0:.5g} (s)'.format(self.residual.period_list[self.period_index]),
fontdict={'size':self.font_size+3, 'weight':'bold'})
plt.show()
def redraw_plot(self):
plt.close('all')
self.plot()
def save_figure(self, save_path=None, save_fn_basename=None,
save_fig_dpi=None, fig_format='.png', fig_close=True):
"""
save figure in the desired format
"""
if save_path is not None:
self.save_path = save_path
if save_fn_basename is not None:
pass
else:
save_fn_basename = '{0:02}_RMS_{1:.5g}_s.{2}'.format(self.period_index,
self.residual.period_list[self.period_index],
fig_format)
save_fn = os.path.join(self.save_path, save_fn_basename)
if save_fig_dpi is not None:
self.fig_dpi = save_fig_dpi
self.fig.savefig(save_fn, dpi=self.fig_dpi)
print 'saved file to {0}'.format(save_fn)
if fig_close == True:
plt.close('all')
def plot_loop(self, fig_format='png'):
"""
loop over all periods and save figures accordingly
"""
self.read_residual_fn()
for f_index in range(self.residual.period_list.size):
self.period_index = f_index
self.plot()
self.save_figure(fig_format=fig_format)
#==============================================================================
# Exceptions
#==============================================================================
class ModEMError(Exception):
pass
|
MTgeophysics/mtpy
|
legacy/modem.py
|
Python
|
gpl-3.0
| 358,776
|
[
"ParaView",
"VTK"
] |
4ef61cf6aef154a575c4484788c731968c1f8114ef0d67f8a5db08fdc3907fce
|
# Copyright 2019 The TensorFlow Hub Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Generates embedding using a TF-Hub module."""
import os
import pickle
import apache_beam as beam
from apache_beam.transforms import util
# TODO(b/176884057): Find a supported alternative to gaussian_random_matrix,
# which became private in scikit-learn 0.24 and is likely to break.
# pylint: disable=g-import-not-at-top
try:
from sklearn.random_projection import gaussian_random_matrix
except ImportError:
from sklearn.random_projection import _gaussian_random_matrix as gaussian_random_matrix
import tensorflow as tf
import tensorflow_hub as hub
# pylint: enable=g-import-not-at-top
_RUNNER = 'DirectRunner'
_RANDOM_PROJECTION_FILENAME = 'random_projection.matrix'
_BATCH_SIZE = 1028
embed_fn = None
def generate_embeddings(items, module_url, random_projection_matrix=None):
"""Generates embeddings using a TF-Hub module.
Args:
items: The items to generate embedding for.
module_url: The TF-Hub module url.
random_projection_matrix: A numpy array of the random projection weights.
Returns:
item, embedding tuple.
"""
global embed_fn
if embed_fn is None:
embed_fn = hub.load(module_url)
embeddings = embed_fn(items).numpy()
if random_projection_matrix is not None:
embeddings = embeddings.dot(random_projection_matrix)
return items, embeddings
def to_tf_example(entries):
"""Convert to tf example."""
examples = []
item_list, embedding_list = entries
for i in range(len(item_list)):
item = item_list[i]
embedding = embedding_list[i]
features = {
'item':
tf.train.Feature(
bytes_list=tf.train.BytesList(value=[item.encode('utf-8')])),
'embedding':
tf.train.Feature(
float_list=tf.train.FloatList(value=embedding.tolist()))
}
example = tf.train.Example(features=tf.train.Features(
feature=features)).SerializeToString(deterministic=True)
examples.append(example)
return examples
def generate_random_projection_weights(original_dim, projected_dim, output_dir):
"""Generates a Gaussian random projection weights matrix."""
random_projection_matrix = None
if projected_dim and original_dim > projected_dim:
random_projection_matrix = gaussian_random_matrix(
n_components=projected_dim, n_features=original_dim).T
print('A Gaussian random weight matrix was creates with shape of {}'.format(
random_projection_matrix.shape))
print('Storing random projection matrix to disk...')
output_file_path = os.path.join(output_dir, _RANDOM_PROJECTION_FILENAME)
with open(output_file_path, 'wb') as handle:
pickle.dump(
random_projection_matrix, handle, protocol=pickle.HIGHEST_PROTOCOL)
print('Random projection matrix saved to disk.')
return random_projection_matrix
def run(args):
"""Runs the embedding generation Beam pipeline."""
if tf.io.gfile.exists(args.embed_output_dir):
print('Removing embedding output directory...')
tf.io.gfile.rmtree(args.embed_output_dir)
print('Creating empty output directory...')
tf.io.gfile.makedirs(args.embed_output_dir)
options = beam.options.pipeline_options.PipelineOptions(**vars(args))
original_dim = hub.load(args.module_url)(['']).shape[1]
random_projection_matrix = generate_random_projection_weights(
original_dim, args.projected_dim, args.embed_output_dir)
print('Starting the Beam pipeline...')
with beam.Pipeline(runner=_RUNNER, options=options) as pipeline:
_ = (
pipeline
| 'Read sentences from files' >>
beam.io.ReadFromText(file_pattern=args.data_file_pattern)
| 'Batch elements' >> util.BatchElements(
min_batch_size=_BATCH_SIZE / 2, max_batch_size=_BATCH_SIZE)
| 'Generate embeddings' >> beam.Map(
generate_embeddings, args.module_url, random_projection_matrix)
| 'Encode to tf example' >> beam.FlatMap(to_tf_example)
| 'Write to TFRecords files' >> beam.io.WriteToTFRecord(
file_path_prefix='{}/emb'.format(args.embed_output_dir),
file_name_suffix='.tfrecords')
)
print('Beam pipeline completed.')
|
tensorflow/hub
|
tensorflow_hub/tools/make_nearest_neighbour_index/embedding_generator.py
|
Python
|
apache-2.0
| 4,754
|
[
"Gaussian"
] |
a486fa28d49a3a742bcf3a6c7783e627ffc0f2557a9307bcbee3b45947641e77
|
import os
path=os.path.join(os.path.expanduser('~'),'.noma/')
fin = open(path+'savefile.txt','r')
savedfile=fin.readlines()
fin.close()
i=0
a=len(savedfile)
while i<a:
savedfile[i]=savedfile[i][:-1]
i+=1
from Tkinter import *
root = Tk()
root.title("Preferences")
####
def parabutton(event=None):
firstpage()
def filenamebutton(event=None):
secondpage()
def controlsbutton(event=None):
thirdpage()
def miscbutton(event=None):
fourthpage()
pagenumber=StringVar()
toppage1=Frame(root)
toppage1.pack(side='top')
parabutton1=Button(toppage1,text='Parameters',command=parabutton)
parabutton1.pack(side='left')
filenamebutton1=Button(toppage1,text='Save names',command=filenamebutton)
filenamebutton1.pack(side='left')
controlsbutton1=Button(toppage1,text='Controls',command=controlsbutton)
controlsbutton1.pack(side='left')
miscbutton1=Button(toppage1,text='Misc.',command=miscbutton)
miscbutton1.pack(side='left')
####
########################### Parameters page First page
firstpage1=Frame(root)
Label(firstpage1,text='Start up',font='Times 14 underline').grid(row=0,column=1)
nummode=StringVar()
nummode.set(savedfile[0])
Entry(firstpage1,width=2,textvariable=nummode).grid(row=1,column=1)
Label(firstpage1,text='Number of modes').grid(row=1,column=2)
Label(firstpage1,text='Collectivity',font='Times 14 underline').grid(row=2,column=1)
massnomass=StringVar()
massnomass.set(savedfile[7])
Radiobutton(firstpage1,value=0,variable=massnomass).grid(row=3,column=1)
Label(firstpage1,text='with mass').grid(row=3,column=2)
Radiobutton(firstpage1,value=1,variable=massnomass).grid(row=4,column=1)
Label(firstpage1,text='without mass').grid(row=4,column=2)
Label(firstpage1,text='Sample Modes',font='Times 14 underline').grid(row=0,column=3)
modeens=StringVar()
modeens.set(savedfile[1])
Entry(firstpage1,width=4,textvariable=modeens).grid(row=1,column=3)
Label(firstpage1,text='modes').grid(row=1,column=4)
confens=StringVar()
confens.set(savedfile[2])
Entry(firstpage1,width=2,textvariable=confens).grid(row=2,column=3)
Label(firstpage1,text='n_confs').grid(row=2,column=4)
rmsdens=StringVar()
rmsdens.set(savedfile[3])
Entry(firstpage1,width=3,textvariable=rmsdens).grid(row=3,column=3)
Label(firstpage1,text='rmsd').grid(row=3,column=4)
Label(firstpage1,text='Traverse Mode',font='Times 14 underline').grid(row=0,column=6)
modetra=StringVar()
modetra.set(savedfile[4])
Entry(firstpage1,width=2,textvariable=modetra).grid(row=1,column=6)
Label(firstpage1,text='mode').grid(row=1,column=7)
steptra=StringVar()
steptra.set(savedfile[5])
Entry(firstpage1,width=3,textvariable=steptra).grid(row=2,column=6)
Label(firstpage1,text='n_steps').grid(row=2,column=7)
rmsdtra=StringVar()
rmsdtra.set(savedfile[6])
Entry(firstpage1,width=3,textvariable=rmsdtra).grid(row=3,column=6)
Label(firstpage1,text='rmsd').grid(row=3,column=7)
Label(firstpage1,text='Gamma\nCutoff',font='Times 14 underline').grid(row=0,column=9,columnspan=2)
gammacutoff=StringVar()
gammacutoff.set(savedfile[9])
Radiobutton(firstpage1,value=0,variable=gammacutoff).grid(row=1,column=8)
Label(firstpage1,text='dist^').grid(row=1,column=9)
gammadistdep=StringVar()
gammadistdep.set(savedfile[91])
Entry(firstpage1,width=3,textvariable=gammadistdep).grid(row=1,column=10)
cutoff1=StringVar()
cutoff1.set(savedfile[10])
Entry(firstpage1,width=5,textvariable=cutoff1).grid(row=1,column=11)
Label(firstpage1,text=u'\u00C5').grid(row=1,column=12)
Radiobutton(firstpage1,value=1,variable=gammacutoff).grid(row=2,column=8)
gamma2=StringVar()
gamma2.set(savedfile[11])
Entry(firstpage1,width=3,textvariable=gamma2).grid(row=2,column=9)
cutoff2=StringVar()
cutoff2.set(savedfile[12])
Entry(firstpage1,width=5,textvariable=cutoff2).grid(row=2,column=11)
Label(firstpage1,text=u'\u00C5').grid(row=2,column=12)
Label(firstpage1,text='Model\nProtofibril',font='Times 14 underline').grid(row=3,column=8)
coordset=StringVar()
coordset.set(savedfile[14])
Entry(firstpage1,width=2,textvariable=coordset).grid(row=4,column=8)
Label(firstpage1,text='Coordinate\nset').grid(row=4,column=9)
########################### File name page Second page
secondpage1=Frame(root)
Label(secondpage1,text='Folder Names',font='Times 14 underline').grid(row=0,column=0)
Label(secondpage1,text='Cannot be left blank').grid(row=1,column=0)
folder1=StringVar()
folder1.set(savedfile[41])
Entry(secondpage1,width=13,textvariable=folder1).grid(row=2,column=0)
Label(secondpage1,text='C-alpha ANM').grid(row=2,column=1)
folder2=StringVar()
folder2.set(savedfile[42])
Entry(secondpage1,width=13,textvariable=folder2).grid(row=3,column=0)
Label(secondpage1,text='C-alpha GNM').grid(row=3,column=1)
folder3=StringVar()
folder3.set(savedfile[43])
Entry(secondpage1,width=13,textvariable=folder3).grid(row=4,column=0)
Label(secondpage1,text='Heavy ANM').grid(row=4,column=1)
folder4=StringVar()
folder4.set(savedfile[44])
Entry(secondpage1,width=13,textvariable=folder4).grid(row=5,column=0)
Label(secondpage1,text='Heavy GNM').grid(row=5,column=1)
folder5=StringVar()
folder5.set(savedfile[45])
Entry(secondpage1,width=13,textvariable=folder5).grid(row=6,column=0)
Label(secondpage1,text='All ANM').grid(row=6,column=1)
folder6=StringVar()
folder6.set(savedfile[46])
Entry(secondpage1,width=13,textvariable=folder6).grid(row=7,column=0)
Label(secondpage1,text='All GNM').grid(row=7,column=1)
folder7=StringVar()
folder7.set(savedfile[47])
Entry(secondpage1,width=13,textvariable=folder7).grid(row=8,column=0)
Label(secondpage1,text='Backbone ANM').grid(row=8,column=1)
folder8=StringVar()
folder8.set(savedfile[48])
Entry(secondpage1,width=13,textvariable=folder8).grid(row=9,column=0)
Label(secondpage1,text='Backbone GNM').grid(row=9,column=1)
folder9=StringVar()
folder9.set(savedfile[49])
Entry(secondpage1,width=13,textvariable=folder9).grid(row=10,column=0)
Label(secondpage1,text='Sidechain ANM').grid(row=10,column=1)
folder10=StringVar()
folder10.set(savedfile[50])
Entry(secondpage1,width=13,textvariable=folder10).grid(row=11,column=0)
Label(secondpage1,text='Sidechain GNM').grid(row=11,column=1)
folder11=StringVar()
folder11.set(savedfile[51])
Entry(secondpage1,width=13,textvariable=folder11).grid(row=12,column=0)
Label(secondpage1,text='Correlation').grid(row=12,column=1)
folder12=StringVar()
folder12.set(savedfile[52])
Entry(secondpage1,width=13,textvariable=folder12).grid(row=13,column=0)
Label(secondpage1,text='SqFlucts').grid(row=13,column=1)
folder13=StringVar()
folder13.set(savedfile[53])
Entry(secondpage1,width=13,textvariable=folder13).grid(row=14,column=0)
Label(secondpage1,text='Overlap').grid(row=14,column=1)
folder14=StringVar()
folder14.set(savedfile[54])
Entry(secondpage1,width=13,textvariable=folder14).grid(row=15,column=0)
Label(secondpage1,text='Modes').grid(row=15,column=1)
folder15=StringVar()
folder15.set(savedfile[55])
Entry(secondpage1,width=13,textvariable=folder15).grid(row=16,column=0)
Label(secondpage1,text='Collectivity').grid(row=16,column=1)
folder16=StringVar()
folder16.set(savedfile[56])
Entry(secondpage1,width=13,textvariable=folder16).grid(row=17,column=0)
Label(secondpage1,text='TempFactors').grid(row=17,column=1)
folder17=StringVar()
folder17.set(savedfile[57])
Entry(secondpage1,width=13,textvariable=folder17).grid(row=18,column=0)
Label(secondpage1,text='PhiPsi').grid(row=18,column=1)
folder18=StringVar()
folder18.set(savedfile[58])
Entry(secondpage1,width=13,textvariable=folder18).grid(row=19,column=0)
Label(secondpage1,text='NMD').grid(row=19,column=1)
Label(secondpage1,text='File Names',font='Times 14 underline').grid(row=0,column=5)
Label(secondpage1,text='Correlations Data').grid(row=2,column=4)
file1=StringVar()
file1.set(savedfile[59])
Entry(secondpage1,width=13,textvariable=file1).grid(row=2,column=5)
Label(secondpage1,text='.').grid(row=2,column=6)
file2=StringVar()
file2.set(savedfile[60])
Entry(secondpage1,width=3,textvariable=file2).grid(row=2,column=7)
#2
Label(secondpage1,text='Correlations Plot').grid(row=3,column=4)
file3=StringVar()
file3.set(savedfile[61])
Entry(secondpage1,width=13,textvariable=file3).grid(row=3,column=5)
Label(secondpage1,text='.').grid(row=3,column=6)
file4=StringVar()
file4.set(savedfile[62])
Entry(secondpage1,width=3,textvariable=file4).grid(row=3,column=7)
#2
Label(secondpage1,text='SqFlucts Data').grid(row=4,column=4)
file5=StringVar()
file5.set(savedfile[63])
Entry(secondpage1,width=13,textvariable=file5).grid(row=4,column=5)
Label(secondpage1,text='.').grid(row=4,column=6)
file6=StringVar()
file6.set(savedfile[64])
Entry(secondpage1,width=3,textvariable=file6).grid(row=4,column=7)
#2
Label(secondpage1,text='SqFlucts Plot').grid(row=5,column=4)
file7=StringVar()
file7.set(savedfile[65])
Entry(secondpage1,width=13,textvariable=file7).grid(row=5,column=5)
Label(secondpage1,text='.').grid(row=5,column=6)
file8=StringVar()
file8.set(savedfile[66])
Entry(secondpage1,width=3,textvariable=file8).grid(row=5,column=7)
#2
Label(secondpage1,text='Overlap Data').grid(row=6,column=4)
file9=StringVar()
file9.set(savedfile[67])
Entry(secondpage1,width=13,textvariable=file9).grid(row=6,column=5)
Label(secondpage1,text='.').grid(row=6,column=6)
file10=StringVar()
file10.set(savedfile[68])
Entry(secondpage1,width=3,textvariable=file10).grid(row=6,column=7)
#2
Label(secondpage1,text='Overlap Plot').grid(row=7,column=4)
file11=StringVar()
file11.set(savedfile[69])
Entry(secondpage1,width=13,textvariable=file11).grid(row=7,column=5)
Label(secondpage1,text='.').grid(row=7,column=6)
file12=StringVar()
file12.set(savedfile[70])
Entry(secondpage1,width=3,textvariable=file12).grid(row=7,column=7)
#2
Label(secondpage1,text='Overlap Table Data').grid(row=8,column=4)
file13=StringVar()
file13.set(savedfile[71])
Entry(secondpage1,width=13,textvariable=file13).grid(row=8,column=5)
Label(secondpage1,text='.').grid(row=8,column=6)
file14=StringVar()
file14.set(savedfile[72])
Entry(secondpage1,width=3,textvariable=file14).grid(row=8,column=7)
#2
Label(secondpage1,text='Overlap Table Plot').grid(row=9,column=4)
file15=StringVar()
file15.set(savedfile[73])
Entry(secondpage1,width=13,textvariable=file15).grid(row=9,column=5)
Label(secondpage1,text='.').grid(row=9,column=6)
file16=StringVar()
file16.set(savedfile[74])
Entry(secondpage1,width=3,textvariable=file16).grid(row=9,column=7)
#2
Label(secondpage1,text='Modes Data').grid(row=10,column=4)
file17=StringVar()
file17.set(savedfile[75])
Entry(secondpage1,width=13,textvariable=file17).grid(row=10,column=5)
Label(secondpage1,text='.').grid(row=10,column=6)
file18=StringVar()
file18.set(savedfile[76])
Entry(secondpage1,width=3,textvariable=file18).grid(row=10,column=7)
#2
Label(secondpage1,text='Modes Plot').grid(row=11,column=4)
file19=StringVar()
file19.set(savedfile[77])
Entry(secondpage1,width=13,textvariable=file19).grid(row=11,column=5)
Label(secondpage1,text='.').grid(row=11,column=6)
file20=StringVar()
file20.set(savedfile[78])
Entry(secondpage1,width=3,textvariable=file20).grid(row=11,column=7)
#2
Label(secondpage1,text='Collectivity').grid(row=12,column=4)
file21=StringVar()
file21.set(savedfile[79])
Entry(secondpage1,width=13,textvariable=file21).grid(row=12,column=5)
Label(secondpage1,text='.').grid(row=12,column=6)
file22=StringVar()
file22.set(savedfile[80])
Entry(secondpage1,width=3,textvariable=file22).grid(row=12,column=7)
#2
Label(secondpage1,text='Temp Factors').grid(row=13,column=4)
file23=StringVar()
file23.set(savedfile[81])
Entry(secondpage1,width=13,textvariable=file23).grid(row=13,column=5)
Label(secondpage1,text='.').grid(row=13,column=6)
file24=StringVar()
file24.set(savedfile[82])
Entry(secondpage1,width=3,textvariable=file24).grid(row=13,column=7)
#2
Label(secondpage1,text='Phi & Psi Data').grid(row=14,column=4)
file25=StringVar()
file25.set(savedfile[83])
Entry(secondpage1,width=13,textvariable=file25).grid(row=14,column=5)
Label(secondpage1,text='.').grid(row=14,column=6)
file26=StringVar()
file26.set(savedfile[84])
Entry(secondpage1,width=3,textvariable=file26).grid(row=14,column=7)
#2
Label(secondpage1,text='Sample').grid(row=15,column=4)
file27=StringVar()
file27.set(savedfile[85])
Entry(secondpage1,width=13,textvariable=file27).grid(row=15,column=5)
Label(secondpage1,text='.').grid(row=15,column=6)
Label(secondpage1,text='pdb').grid(row=15,column=7)
#1
Label(secondpage1,text='Traverse').grid(row=16,column=4)
file28=StringVar()
file28.set(savedfile[86])
Entry(secondpage1,width=13,textvariable=file28).grid(row=16,column=5)
Label(secondpage1,text='.').grid(row=16,column=6)
Label(secondpage1,text='pdb').grid(row=16,column=7)
#1
Label(secondpage1,text='NMD').grid(row=17,column=4)
file29=StringVar()
file29.set(savedfile[87])
Entry(secondpage1,width=13,textvariable=file29).grid(row=17,column=5)
Label(secondpage1,text='.').grid(row=17,column=6)
Label(secondpage1,text='nmd').grid(row=17,column=7)
#1
Label(secondpage1,text='Model').grid(row=18,column=4)
file30=StringVar()
file30.set(savedfile[88])
Entry(secondpage1,width=13,textvariable=file30).grid(row=18,column=5)
Label(secondpage1,text='.').grid(row=18,column=6)
Label(secondpage1,text='npz').grid(row=18,column=7)
#1
Label(secondpage1,text='Fraction of Variance').grid(row=19,column=4)
file31=StringVar()
file31.set(savedfile[89])
Entry(secondpage1,width=13,textvariable=file31).grid(row=19,column=5)
Label(secondpage1,text='.').grid(row=19,column=6)
file32=StringVar()
file32.set(savedfile[90])
Entry(secondpage1,width=3,textvariable=file32).grid(row=19,column=7)
#2
########################### Controls page Third page
thirdpage1=Frame(root)
button1=StringVar()
button1.set(savedfile[16])
Entry(thirdpage1,width=13,textvariable=button1).grid(row=0,column=0)
Label(thirdpage1,text='Browsing').grid(row=0,column=1)
button2=StringVar()
button2.set(savedfile[17])
Entry(thirdpage1,width=13,textvariable=button2).grid(row=2,column=0)
Label(thirdpage1,text='Submitting').grid(row=2,column=1)
button3=StringVar()
button3.set(savedfile[18])
Entry(thirdpage1,width=13,textvariable=button3).grid(row=3,column=0)
Label(thirdpage1,text='Execute notes').grid(row=3,column=1)
button4=StringVar()
button4.set(savedfile[19])
Entry(thirdpage1,width=13,textvariable=button4).grid(row=4,column=0)
Label(thirdpage1,text='Saving and switching').grid(row=4,column=1)
button5=StringVar()
button5.set(savedfile[20])
Entry(thirdpage1,width=13,textvariable=button5).grid(row=5,column=0)
Label(thirdpage1,text='Quit').grid(row=5,column=1)
button6=StringVar()
button6.set(savedfile[21])
Entry(thirdpage1,width=13,textvariable=button6).grid(row=6,column=0)
Label(thirdpage1,text='Open').grid(row=6,column=1)
button7=StringVar()
button7.set(savedfile[22])
Entry(thirdpage1,width=13,textvariable=button7).grid(row=7,column=0)
Label(thirdpage1,text='Xmgrace').grid(row=7,column=1)
button8=StringVar()
button8.set(savedfile[23])
Entry(thirdpage1,width=13,textvariable=button8).grid(row=8,column=0)
Label(thirdpage1,text='VMD').grid(row=8,column=1)
button9=StringVar()
button9.set(savedfile[24])
Entry(thirdpage1,width=13,textvariable=button9).grid(row=9,column=0)
Label(thirdpage1,text='Preferences').grid(row=9,column=1)
button10=StringVar()
button10.set(savedfile[25])
Entry(thirdpage1,width=13,textvariable=button10).grid(row=10,column=0)
Label(thirdpage1,text='About').grid(row=10,column=1)
button11=StringVar()
button11.set(savedfile[26])
Entry(thirdpage1,width=13,textvariable=button11).grid(row=11,column=0)
Label(thirdpage1,text='Manual').grid(row=11,column=1)
button12=StringVar()
button12.set(savedfile[27])
Entry(thirdpage1,width=13,textvariable=button12).grid(row=12,column=0)
Label(thirdpage1,text='Calculator').grid(row=12,column=1)
button13=StringVar()
button13.set(savedfile[28])
Entry(thirdpage1,width=13,textvariable=button13).grid(row=13,column=0)
Label(thirdpage1,text='Find comparison').grid(row=13,column=1)
button14=StringVar()
button14.set(savedfile[29])
Entry(thirdpage1,width=13,textvariable=button14).grid(row=14,column=0)
Label(thirdpage1,text='Next file').grid(row=14,column=1)
button15=StringVar()
button15.set(savedfile[30])
Entry(thirdpage1,width=13,textvariable=button15).grid(row=15,column=0)
Label(thirdpage1,text='Previous file').grid(row=15,column=1)
button16=StringVar()
button16.set(savedfile[31])
Entry(thirdpage1,width=13,textvariable=button16).grid(row=16,column=0)
Label(thirdpage1,text='Up NPZ Option').grid(row=16,column=1)
button17=StringVar()
button17.set(savedfile[32])
Entry(thirdpage1,width=13,textvariable=button17).grid(row=17,column=0)
Label(thirdpage1,text='Down NPZ Option').grid(row=17,column=1)
button18=StringVar()
button18.set(savedfile[33])
Entry(thirdpage1,width=13,textvariable=button18).grid(row=18,column=0)
Label(thirdpage1,text='Right NPZ Option').grid(row=18,column=1)
button19=StringVar()
button19.set(savedfile[34])
Entry(thirdpage1,width=13,textvariable=button19).grid(row=19,column=0)
Label(thirdpage1,text='Left NPZ Option').grid(row=19,column=1)
button20=StringVar()
button20.set(savedfile[35])
Entry(thirdpage1,width=13,textvariable=button20).grid(row=20,column=0)
Label(thirdpage1,text='Add to modes').grid(row=20,column=1)
button21=StringVar()
button21.set(savedfile[36])
Entry(thirdpage1,width=13,textvariable=button21).grid(row=21,column=0)
Label(thirdpage1,text='Subtract from modes').grid(row=21,column=1)
button22=StringVar()
button22.set(savedfile[37])
Entry(thirdpage1,width=13,textvariable=button22).grid(row=22,column=0)
Label(thirdpage1,text='Right on main tab').grid(row=22,column=1)
button23=StringVar()
button23.set(savedfile[38])
Entry(thirdpage1,width=13,textvariable=button23).grid(row=23,column=0)
Label(thirdpage1,text='Left on main tab').grid(row=23,column=1)
button24=StringVar()
button24.set(savedfile[39])
Entry(thirdpage1,width=13,textvariable=button24).grid(row=24,column=0)
Label(thirdpage1,text='Stat. PDB').grid(row=24,column=1)
button25=StringVar()
button25.set(savedfile[40])
Entry(thirdpage1,width=13,textvariable=button25).grid(row=25,column=0)
Label(thirdpage1,text='Stat. NPZ').grid(row=25,column=1)
############################# MISC PAGE
fourthpage1=Frame(root)
Label(fourthpage1,text='User Interface',font='Times 14 underline').grid(row=5,column=1)
startpage=StringVar()
startpage.set(savedfile[8])
Radiobutton(fourthpage1,value=0,variable=startpage).grid(row=6,column=1)
Label(fourthpage1,text='New').grid(row=6,column=2)
Radiobutton(fourthpage1,value=1,variable=startpage).grid(row=7,column=1)
Label(fourthpage1,text='Old').grid(row=7,column=2)
Label(fourthpage1,text='Auto-name',font='Times 14 underline').grid(row=5,column=5)
autoname=StringVar()
autoname.set(savedfile[15])
Radiobutton(fourthpage1,value=1,variable=autoname).grid(row=6,column=5)
Label(fourthpage1,text='Yes').grid(row=6,column=6)
Radiobutton(fourthpage1,value=0,variable=autoname).grid(row=7,column=5)
Label(fourthpage1,text='No').grid(row=7,column=6)
Label(fourthpage1,text='Show Results',font='Times 14 underline').grid(row=5,column=8)
showresults=StringVar()
showresults.set(savedfile[13])
Radiobutton(fourthpage1,value=1,variable=showresults).grid(row=6,column=8)
Label(fourthpage1,text='Yes').grid(row=6,column=9)
Radiobutton(fourthpage1,value=0,variable=showresults).grid(row=7,column=8)
Label(fourthpage1,text='No').grid(row=7,column=9)
############################# COMMANDS
def save(event=None):
fout=open(path+'savefile.txt','w')
fout.write(nummode.get()+"""
"""+modeens.get()+"""
"""+confens.get()+"""
"""+rmsdens.get()+"""
"""+modetra.get()+"""
"""+steptra.get()+"""
"""+rmsdtra.get()+"""
"""+massnomass.get()+"""
"""+startpage.get()+"""
"""+gammacutoff.get()+"""
"""+cutoff1.get()+"""
"""+gamma2.get()+"""
"""+cutoff2.get()+"""
"""+showresults.get()+"""
"""+coordset.get()+"""
"""+autoname.get()+"""
"""+button1.get()+"""
"""+button2.get()+"""
"""+button3.get()+"""
"""+button4.get()+"""
"""+button5.get()+"""
"""+button6.get()+"""
"""+button7.get()+"""
"""+button8.get()+"""
"""+button9.get()+"""
"""+button10.get()+"""
"""+button11.get()+"""
"""+button12.get()+"""
"""+button13.get()+"""
"""+button14.get()+"""
"""+button15.get()+"""
"""+button16.get()+"""
"""+button17.get()+"""
"""+button18.get()+"""
"""+button19.get()+"""
"""+button20.get()+"""
"""+button21.get()+"""
"""+button22.get()+"""
"""+button23.get()+"""
"""+button24.get()+"""
"""+button25.get()+"""
"""+folder1.get()+"""
"""+folder2.get()+"""
"""+folder3.get()+"""
"""+folder4.get()+"""
"""+folder5.get()+"""
"""+folder6.get()+"""
"""+folder7.get()+"""
"""+folder8.get()+"""
"""+folder9.get()+"""
"""+folder10.get()+"""
"""+folder11.get()+"""
"""+folder12.get()+"""
"""+folder13.get()+"""
"""+folder14.get()+"""
"""+folder15.get()+"""
"""+folder16.get()+"""
"""+folder17.get()+"""
"""+folder18.get()+"""
"""+file1.get()+"""
"""+file2.get()+"""
"""+file3.get()+"""
"""+file4.get()+"""
"""+file5.get()+"""
"""+file6.get()+"""
"""+file7.get()+"""
"""+file8.get()+"""
"""+file9.get()+"""
"""+file10.get()+"""
"""+file11.get()+"""
"""+file12.get()+"""
"""+file13.get()+"""
"""+file14.get()+"""
"""+file15.get()+"""
"""+file16.get()+"""
"""+file17.get()+"""
"""+file18.get()+"""
"""+file19.get()+"""
"""+file20.get()+"""
"""+file21.get()+"""
"""+file22.get()+"""
"""+file23.get()+"""
"""+file24.get()+"""
"""+file25.get()+"""
"""+file26.get()+"""
"""+file27.get()+"""
"""+file28.get()+"""
"""+file29.get()+"""
"""+file30.get()+"""
"""+file31.get()+"""
"""+file32.get()+"""
"""+gammadistdep.get()+"""
"""+pagenumber.get()+"""
""")
fout.close()
def default(event=None):
nummode.set('20')
modeens.set('7,8')
confens.set('12')
rmsdens.set('3.0')
modetra.set('7')
steptra.set('5')
rmsdtra.set('3.0')
massnomass.set('0')
startpage.set('0')
gammacutoff.set('0')
cutoff1.set('100.0')
gamma2.set('1.0')
cutoff2.set('13.0')
showresults.set('0')
coordset.set('1')
autoname.set('1')
button1.set('Control-b')
button2.set('Return')
button3.set('Control-e')
button4.set('Control-s')
button5.set('Control-q')
button6.set('Control-Shift-O')
button7.set('Control-Shift-X')
button8.set('Control-Shift-V')
button9.set('Control-i')
button10.set('Control-a')
button11.set('Control-m')
button12.set('Control-Shift-C')
button13.set('Control-f')
button14.set('Control-n')
button15.set('Control-p')
button16.set('Control-Up')
button17.set('Control-Down')
button18.set('Control-Right')
button19.set('Control-Left')
button20.set('Up')
button21.set('Down')
button22.set('')
button23.set('')
button24.set('')
button25.set('')
folder1.set('Ca_ANM')
folder2.set('Ca_GNM')
folder3.set('Heavy_ANM')
folder4.set('Heavy_GNM')
folder5.set('All_ANM')
folder6.set('All_GNM')
folder7.set('Backbone_ANM')
folder8.set('Backbone_GNM')
folder9.set('Sidechain_ANM')
folder10.set('Sidechain_GNM')
folder11.set('Correlation')
folder12.set('SqFlucts')
folder13.set('Overlap')
folder14.set('Modes')
folder15.set('Collectivity')
folder16.set('Temp')
folder17.set('PhiPsi')
folder18.set('NMD')
file1.set('_correlation')
file2.set('txt')
file3.set('_correlation_plot')
file4.set('png')
file5.set('_sqflucts')
file6.set('txt')
file7.set('_sqflucts')
file8.set('png')
file9.set('_overlap')
file10.set('txt')
file11.set('_overlap')
file12.set('png')
file13.set('_overlap_table')
file14.set('txt')
file15.set('_overlap_table')
file16.set('png')
file17.set('_modes')
file18.set('txt')
file19.set('_mode')
file20.set('png')
file21.set('_collectivity')
file22.set('txt')
file23.set('_TempFactors')
file24.set('txt')
file25.set('average')
file26.set('txt')
file27.set('_sample')
file28.set('_traverse')
file29.set('')
file30.set('')
file31.set('_FractOfVariances')
file32.set('png')
pagenumber.set('1')
gammadistdep.set('-2')
def quit(event=None):
root.destroy()
try:
root.bind('<'+savedfile[20]+'>',quit)
except:
mer=0
try:
root.bind('<'+savedfile[19]+'>',save)
except:
mer=0
def clear():
firstpage1.pack_forget()
secondpage1.pack_forget()
thirdpage1.pack_forget()
fourthpage1.pack_forget()
def firstpage():
clear()
pagenumber.set('1')
parabutton1.config(relief=FLAT,bg='gray85')
filenamebutton1.config(relief=SUNKEN,bg='gray70')
controlsbutton1.config(relief=SUNKEN,bg='gray70')
miscbutton1.config(relief=SUNKEN,bg='gray70')
firstpage1.pack(side='left')
def secondpage():
clear()
pagenumber.set('2')
filenamebutton1.config(relief=FLAT,bg='gray85')
parabutton1.config(relief=SUNKEN,bg='gray70')
miscbutton1.config(relief=SUNKEN,bg='gray70')
controlsbutton1.config(relief=SUNKEN,bg='gray70')
secondpage1.pack(side='left')
def thirdpage():
clear()
pagenumber.set('3')
controlsbutton1.config(relief=FLAT,bg='gray85')
miscbutton1.config(relief=SUNKEN,bg='gray70')
parabutton1.config(relief=SUNKEN,bg='gray70')
filenamebutton1.config(relief=SUNKEN,bg='gray70')
thirdpage1.pack(side='left')
def fourthpage():
clear()
pagenumber.set('4')
miscbutton1.config(relief=FLAT,bg='gray85')
filenamebutton1.config(relief=SUNKEN,bg='gray70')
controlsbutton1.config(relief=SUNKEN,bg='gray70')
parabutton1.config(relief=SUNKEN,bg='gray70')
fourthpage1.pack(side='left')
#
Button(firstpage1,text='Save',command=save).grid(row=6,column=3,rowspan=2,columnspan=2)
Button(firstpage1,text='Default',command=default).grid(row=6,column=5,rowspan=2)
Button(firstpage1,text='Quit',command=quit).grid(row=6,column=6,rowspan=2,columnspan=2)
#
Button(secondpage1,text='Save',command=save).grid(row=20,column=0)
Button(secondpage1,text='Default',command=default).grid(row=20,column=2)
Button(secondpage1,text='Quit',command=quit).grid(row=20,column=5)
#
Button(thirdpage1,text='Save',command=save).grid(row=26,column=0)
Button(thirdpage1,text='Default',command=default).grid(row=26,column=1)
Button(thirdpage1,text='Quit',command=quit).grid(row=26,column=2)
#
Button(fourthpage1,text='Save',command=save).grid(row=9,column=3,rowspan=2,columnspan=2)
Button(fourthpage1,text='Default',command=default).grid(row=9,column=5,rowspan=2)
Button(fourthpage1,text='Quit',command=quit).grid(row=9,column=6,rowspan=2,columnspan=2)
#
def pagenumberpage():
if pagenumber.get()=='2':
secondpage()
elif pagenumber.get()=='3':
thirdpage()
elif pagenumber.get()=='4':
fourthpage()
else:
firstpage()
def pagenumberright(event=None):
if pagenumber.get()=='4':
pagenumber.set('1')
else:
pagenumber.set(str(int(pagenumber.get())+1))
pagenumberpage()
def pagenumberleft(event=None):
if pagenumber.get()=='1':
pagenumber.set('4')
else:
pagenumber.set(str(int(pagenumber.get())-1))
pagenumberpage()
try:
root.bind('<'+savedfile[37]+'>',pagenumberright)
except:
mer=0
try:
root.bind('<'+savedfile[38]+'>',pagenumberleft)
except:
mer=0
pagenumber.set(savedfile[92])
pagenumberpage()
root.mainloop()
|
marmy28/NOMA
|
saving.py
|
Python
|
gpl-2.0
| 26,562
|
[
"VMD"
] |
2513545f8012230c5fb52c03eaf55a620c610deebe79e16e6bc59bedb57f9aed
|
# Checks all psi4 relevant files for proper boilerplate GNU license.
# This is sold as is with no warrenty-- probably should double check everything
# after running. I am not responsible if you break Psi4.
#
# Do not forget to do share/plugins by hand!
import os
# File type we know how to handle
ftypes = ['cc', 'h', 'py']
c_header ="""/*
* @BEGIN LICENSE
*
* Psi4: an open-source quantum chemistry software package
*
* Copyright (c) 2007-2017 The Psi4 Developers.
*
* The copyrights for code used from other parties are included in
* the corresponding files.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License along
* with this program; if not, write to the Free Software Foundation, Inc.,
* 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
*
* @END LICENSE
*/"""
py_header = c_header.replace(' */', '#')
py_header = py_header.replace('/*', '#')
py_header = py_header.replace(' *', '#')
c_header = c_header.splitlines()
py_header = py_header.splitlines()
def check_header(infile):
f = open(infile, 'r+')
data = f.read().splitlines()
# Find the header location
max_lines = 30
try:
symbol = None
if filename.split('.')[-1] in ['py']:
start = data.index("# @BEGIN LICENSE") - 1
end = data.index("# @END LICENSE") + 1
if data[start] != '#' or data[end] != '#':
f.close()
print('Did not find "wings" of license block in file %s' % infile)
return
else:
start = data.index(" * @BEGIN LICENSE") - 1
end = data.index(" * @END LICENSE") + 1
if data[start] != '/*' or data[end] != ' */':
f.close()
print('Did not find "wings" of license block in file %s' % infile)
return
except:
print('Could not find license block in file %s' % infile)
f.close()
return
# Make sure the block actually looks like a license
license = data[start:end+1]
top = any("PSI4:" in x.upper() for x in license[:5])
bot = any("51 Franklin Street" in x for x in license[5:])
if not (top and bot):
print('Did not understand infile %s' % infile)
f.close()
return
# Replace license
if filename.split('.')[-1] in ['cc', 'h']:
data[start:end + 1] = c_header
elif filename.split('.')[-1] in ['py']:
data[start:end + 1] = py_header
else:
print('Did not understand infile end: %s' % infile)
f.close()
return
# Write it out
f.seek(0)
f.write("\n".join(data))
f.truncate()
f.close()
avoid_strings = ['qcdb', 'libJKFactory']
walk = list(os.walk('../../src/'))
walk += list(os.walk('../python'))
for root, dirnames, filenames in walk:
if any(x in root for x in avoid_strings):
continue
for filename in filenames:
if filename.split('.')[-1] not in ftypes:
continue
check_header(root + '/' + filename)
|
kratman/psi4public
|
psi4/share/psi4/scripts/apply_license.py
|
Python
|
gpl-2.0
| 3,531
|
[
"Psi4"
] |
817e4660ca44edf1177bf3b633bbd26371cf18ec5fc581c548cf7b6a043cc285
|
#!/bin/env python
# -*- coding: utf-8 -*-
# Copyright (c) 2014, Georgios Is. Detorakis (gdetor@gmail.com) and
# Nicolas P. Rougier (nicolas.rougier@inria.fr)
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its contributors
# may be used to endorse or promote products derived from this software without
# specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
#
# This file is part of the source code accompany the peer-reviewed article:
# [1] "Structure of Receptive Fields in a Computational Model of Area 3b of
# Primary Sensory Cortex", Georgios Is. Detorakis and Nicolas P. Rougier,
# Frontiers in Computational Neuroscience, 2014.
#
# DNF-2D-SOM-REF.py generates the topographic maps of during an intensive
# training of area 3b as it is described in [1].
#
# Computational reproduction of DiCarlo et al., 1998 experimental protocol.
# The model is explained in [1].
#
# -----------------------------------------------------------------------------
# Structure of Receptive Fields in Area 3b of Primary Somatosensory Cortex in
# the Alert Monkey - James J. DiCarlo, Kenneth O. Johnson, and Steven S. Hsiao
# The Journal of Neuroscience, April 1, 1998, 18(7):2626-2645
# -----------------------------------------------------------------------------
import numpy as np
import matplotlib.pylab as plt
from numpy.fft import rfft2, irfft2, ifftshift
# -----------------------------------------------------------------------------
def grid(n, xmin=0.0, xmax=1.0, ymin=0.0, ymax=1.0, noise=0.0):
_X = (np.resize(np.linspace(xmin,xmax,n),(n,n))).ravel()
_Y = (np.resize(np.linspace(ymin,ymax,n),(n,n)).T).ravel()
X = _X + np.random.uniform(-noise, noise, n*n)
Y = _Y + np.random.uniform(-noise, noise, n*n)
Imin, Imax = np.argwhere(X < xmin), np.argwhere(X > xmax)
while len(Imin) or len(Imax):
X[Imin] = _X[Imin] + np.random.uniform(-noise, noise, len(Imin))
X[Imax] = _X[Imax] + np.random.uniform(-noise, noise, len(Imax))
Imin, Imax = np.argwhere(X < xmin), np.argwhere(X > xmax)
Imin, Imax = np.argwhere(Y < ymin), np.argwhere(Y > ymax)
while len(Imin) or len(Imax):
Y[Imin] = _Y[Imin] + np.random.uniform(-noise, noise, len(Imin))
Y[Imax] = _Y[Imax] + np.random.uniform(-noise, noise, len(Imax))
Imin, Imax = np.argwhere(Y < ymin), np.argwhere(Y > ymax)
Z = np.zeros((n*n, 2))
Z[:,0], Z[:,1] = X.ravel(), Y.ravel()
return Z
# Receptors regular grid. Jitter can be added.
def grid_toric(n, xmin=0.0, xmax=1.0, ymin=0.0, ymax=1.0, noise=0.0):
x = np.linspace(xmin,xmax,n,endpoint=False)
y = np.linspace(ymin,ymax,n,endpoint=False)
X,Y = np.meshgrid(x,y)
X += np.random.uniform(-noise, noise, (n,n))
X = np.mod(X+1,1)
Y += np.random.uniform(-noise, noise, (n,n))
Y = np.mod(Y+1,1)
return X.ravel(), Y.ravel()
def g(x,sigma = 0.1):
return np.exp(-x**2/sigma**2)
def fromdistance(fn, shape, center=None, dtype=float):
def distance(*args):
d = 0
for i in range(len(shape)):
d += ((args[i]-center[i])/float(max(1,shape[i]-1)))**2
return np.sqrt(d)/np.sqrt(len(shape))
if center == None:
center = np.array(list(shape))//2
return fn(np.fromfunction(distance,shape,dtype=dtype))
def Gaussian(shape,center,sigma=0.5):
def g(x):
return np.exp(-x**2/sigma**2)
return fromdistance(g,shape,center)
def generate_input(R,S):
"""
Given a grid of receptors and a list of stimuli positions, return the
corresponding input
"""
if len(S):
dX = np.abs(R[:,0].reshape(1,len(R)) - S[:,0].reshape(len(S),1))
dY = np.abs(R[:,1].reshape(1,len(R)) - S[:,1].reshape(len(S),1))
C = np.sqrt(dX*dX+dY*dY) / np.sqrt(2)
return g(C).max(axis=0)
return np.zeros(R.shape[0])
def h(x, sigma=1.0):
return np.exp(-0.5*(x/sigma)**2)
def stimulus_detection( S ):
ins = S[6:19,6:19].sum()
out = S.sum() - ins
print out, ins
if ins > out:
return 1
else:
return 0
def activity_area( data ):
return sum( 1 for i in data.flatten() if i > 0 )
# -----------------------------------------------------------------------------
if __name__ == '__main__':
np.random.seed(137)
# Standard units
# --------------
alpha, tau = 0.1, 1.0
second = 1.0
millisecond = 1e-3 * second
ms = millisecond
minute = 60 * second
meter = 1.0
millimeter = 1e-3 * meter
mm = millimeter
micrometer = 1e-6 * meter
# Simulation parameters
# ---------------------
dots_number = 500
drum_length = 250*mm
drum_width = 30*mm
drum_shift = 200*micrometer
drum_velocity = 40*mm / second
simulation_time = 5*minute
sampling_rate = 10*ms
dt = sampling_rate
skinpatch = 10*mm,10*mm # width x height
RF_sampling = 25,25
plot = False
Rn = 16
R = grid(Rn,noise=0.05)
# Generate the drum pattern
# -------------------------
drum = np.zeros( (dots_number,2) )
drum[:,0] = np.random.uniform(0,drum_length,dots_number)
drum[:,1] = np.random.uniform(0,drum_width, dots_number)
drum_x,drum_y = drum[:,0], drum[:,1]
print "Estimated number of samples: %d" % (simulation_time/dt)
# SOM learning
# -------------
Sn = 32
W = np.random.uniform(0,1,(Sn*Sn,Rn*Rn))
RF_count = np.zeros((Sn,Sn,25,25))
RF_sum = np.zeros((Sn,Sn,25,25))
global_count = np.zeros((Sn,Sn))
global_sum = np.zeros((Sn,Sn))
U = np.random.random((Sn,Sn)) * .01
V = np.random.random((Sn,Sn)) * .01
x_inf, x_sup, y_inf, y_sup = 0.0, 1.0, 0.0, 1.0
X, Y = np.meshgrid( np.linspace(x_inf,x_sup,Sn+1,endpoint=True)[1:],
np.linspace(y_inf,y_sup,Sn+1,endpoint=True)[1:] )
D = np.sqrt( (X-0.5)**2 + (Y-0.5)**2 )
We = 3.65*960.0/(32*32) * h( D, 0.1 )
Wi = 2.40*960.0/(32*32) * h( D, 1.0 )
We_fft = rfft2( ifftshift( We[::-1,::-1] ) )
Wi_fft = rfft2( ifftshift( Wi[::-1,::-1] ) )
time = 10.0
folder = '/home/Local/SOM/Attention/Dicarlo/'
# Run the simulated drum
ii = 0
for t in np.arange(0.0,simulation_time,dt):
z = t * drum_velocity
x = z % (drum_length - skinpatch[0])
y = int(z / (drum_length - skinpatch[0])) * drum_shift
# Maybe this should be adjusted since a stimulus lying outside the skin
# patch may still have influence on the input (for example, if it lies
# very near the border)
xmin, xmax = x, x+skinpatch[0]
ymin, ymax = y, y+skinpatch[1]
# Get dots contained on the skin patch (and normalize coordinates)
dots = drum[(drum_x > (xmin)) *
(drum_x < (xmax)) *
(drum_y > (ymin)) *
(drum_y < (ymax))]
dots -= (x,y)
dots /= skinpatch[0],skinpatch[1]
# Compute RF mask
RF_mask = np.zeros(RF_sampling)
for dot in dots:
index = (np.floor(dot*RF_sampling)).astype(int)
RF_mask[index[1],index[0]] = 1
# Compute corresponding input (according to receptors)
S = generate_input(R,dots)
# Generate the som answer
D = (( np.abs( W - S )).sum(axis=-1))/float(Rn*Rn)
I = ( 1.0 - D.reshape(Sn,Sn) ) * alpha
for jj in range( int(time/dt) ):
Z = rfft2( V * alpha )
Le = irfft2( Z * We_fft, (Sn,Sn) ).real
Li = irfft2( Z * Wi_fft, (Sn,Sn) ).real
U += ( -U + ( Le - Li ) + I )* dt * tau
V = np.maximum( U, 0.0 )
W -= 0.05 * ( Le.ravel() * ( W - S ).T ).T
if ii%50==0:
print ii
np.save( folder+'weights'+str( '%06d' % ii ), W )
# Compute the mean firing rate
global_sum += V
global_count += 1
# Compute the local mean firing rate
RF_sum += V.reshape(Sn,Sn,1,1)*RF_mask
RF_count += RF_mask
U = np.random.random((Sn,Sn)) * .01
V = np.random.random((Sn,Sn)) * .01
mean = global_sum/(1+global_count)
RFs = RF_sum/(1+RF_count) - mean.reshape(Sn,Sn,1,1)
ii += 1
np.save( folder+'weights'+str( '%06d' % ii ), W )
np.save( folder+'RFs.npy', RFs)
np.save( folder+'RF_sum', RF_sum )
np.save( folder+'RF_count', RF_count )
|
gdetor/SI-RF-Structure
|
DiCarloProtocol/dicarlo-learning.py
|
Python
|
gpl-3.0
| 9,758
|
[
"Gaussian"
] |
8794cfb6d5b5afee21796b4e74405eca99b7eebb826dce7f0cdffbb34be08068
|
"""Support code for working with Shippable."""
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import os
import re
import time
from .. import types as t
from ..config import (
CommonConfig,
TestConfig,
)
from ..git import (
Git,
)
from ..http import (
HttpClient,
urlencode,
)
from ..util import (
ApplicationError,
display,
MissingEnvironmentVariable,
SubprocessError,
)
from . import (
AuthContext,
ChangeDetectionNotSupported,
CIProvider,
OpenSSLAuthHelper,
)
CODE = 'shippable'
class Shippable(CIProvider):
"""CI provider implementation for Shippable."""
def __init__(self):
self.auth = ShippableAuthHelper()
@staticmethod
def is_supported(): # type: () -> bool
"""Return True if this provider is supported in the current running environment."""
return os.environ.get('SHIPPABLE') == 'true'
@property
def code(self): # type: () -> str
"""Return a unique code representing this provider."""
return CODE
@property
def name(self): # type: () -> str
"""Return descriptive name for this provider."""
return 'Shippable'
def generate_resource_prefix(self): # type: () -> str
"""Return a resource prefix specific to this CI provider."""
try:
prefix = 'shippable-%s-%s' % (
os.environ['SHIPPABLE_BUILD_NUMBER'],
os.environ['SHIPPABLE_JOB_NUMBER'],
)
except KeyError as ex:
raise MissingEnvironmentVariable(name=ex.args[0])
return prefix
def get_base_branch(self): # type: () -> str
"""Return the base branch or an empty string."""
base_branch = os.environ.get('BASE_BRANCH')
if base_branch:
base_branch = 'origin/%s' % base_branch
return base_branch or ''
def detect_changes(self, args): # type: (TestConfig) -> t.Optional[t.List[str]]
"""Initialize change detection."""
result = ShippableChanges(args)
if result.is_pr:
job_type = 'pull request'
elif result.is_tag:
job_type = 'tag'
else:
job_type = 'merge commit'
display.info('Processing %s for branch %s commit %s' % (job_type, result.branch, result.commit))
if not args.metadata.changes:
args.metadata.populate_changes(result.diff)
if result.paths is None:
# There are several likely causes of this:
# - First run on a new branch.
# - Too many pull requests passed since the last merge run passed.
display.warning('No successful commit found. All tests will be executed.')
return result.paths
def supports_core_ci_auth(self, context): # type: (AuthContext) -> bool
"""Return True if Ansible Core CI is supported."""
return True
def prepare_core_ci_auth(self, context): # type: (AuthContext) -> t.Dict[str, t.Any]
"""Return authentication details for Ansible Core CI."""
try:
request = dict(
run_id=os.environ['SHIPPABLE_BUILD_ID'],
job_number=int(os.environ['SHIPPABLE_JOB_NUMBER']),
)
except KeyError as ex:
raise MissingEnvironmentVariable(name=ex.args[0])
self.auth.sign_request(request)
auth = dict(
shippable=request,
)
return auth
def get_git_details(self, args): # type: (CommonConfig) -> t.Optional[t.Dict[str, t.Any]]
"""Return details about git in the current environment."""
commit = os.environ.get('COMMIT')
base_commit = os.environ.get('BASE_COMMIT')
details = dict(
base_commit=base_commit,
commit=commit,
merged_commit=self._get_merged_commit(args, commit),
)
return details
# noinspection PyUnusedLocal
def _get_merged_commit(self, args, commit): # type: (CommonConfig, str) -> t.Optional[str] # pylint: disable=unused-argument
"""Find the merged commit that should be present."""
if not commit:
return None
git = Git()
try:
show_commit = git.run_git(['show', '--no-patch', '--no-abbrev', commit])
except SubprocessError as ex:
# This should only fail for pull requests where the commit does not exist.
# Merge runs would fail much earlier when attempting to checkout the commit.
raise ApplicationError('Commit %s was not found:\n\n%s\n\n'
'GitHub may not have fully replicated the commit across their infrastructure.\n'
'It is also possible the commit was removed by a force push between job creation and execution.\n'
'Find the latest run for the pull request and restart failed jobs as needed.'
% (commit, ex.stderr.strip()))
head_commit = git.run_git(['show', '--no-patch', '--no-abbrev', 'HEAD'])
if show_commit == head_commit:
# Commit is HEAD, so this is not a pull request or the base branch for the pull request is up-to-date.
return None
match_merge = re.search(r'^Merge: (?P<parents>[0-9a-f]{40} [0-9a-f]{40})$', head_commit, flags=re.MULTILINE)
if not match_merge:
# The most likely scenarios resulting in a failure here are:
# A new run should or does supersede this job, but it wasn't cancelled in time.
# A job was superseded and then later restarted.
raise ApplicationError('HEAD is not commit %s or a merge commit:\n\n%s\n\n'
'This job has likely been superseded by another run due to additional commits being pushed.\n'
'Find the latest run for the pull request and restart failed jobs as needed.'
% (commit, head_commit.strip()))
parents = set(match_merge.group('parents').split(' '))
if len(parents) != 2:
raise ApplicationError('HEAD is a %d-way octopus merge.' % len(parents))
if commit not in parents:
raise ApplicationError('Commit %s is not a parent of HEAD.' % commit)
parents.remove(commit)
last_commit = parents.pop()
return last_commit
class ShippableAuthHelper(OpenSSLAuthHelper):
"""
Authentication helper for Shippable.
Based on OpenSSL since cryptography is not provided by the default Shippable environment.
"""
def publish_public_key(self, public_key_pem): # type: (str) -> None
"""Publish the given public key."""
# display the public key as a single line to avoid mangling such as when prefixing each line with a timestamp
display.info(public_key_pem.replace('\n', ' '))
# allow time for logs to become available to reduce repeated API calls
time.sleep(3)
class ShippableChanges:
"""Change information for Shippable build."""
def __init__(self, args): # type: (CommonConfig) -> None
self.args = args
self.git = Git()
try:
self.branch = os.environ['BRANCH']
self.is_pr = os.environ['IS_PULL_REQUEST'] == 'true'
self.is_tag = os.environ['IS_GIT_TAG'] == 'true'
self.commit = os.environ['COMMIT']
self.project_id = os.environ['PROJECT_ID']
self.commit_range = os.environ['SHIPPABLE_COMMIT_RANGE']
except KeyError as ex:
raise MissingEnvironmentVariable(name=ex.args[0])
if self.is_tag:
raise ChangeDetectionNotSupported('Change detection is not supported for tags.')
if self.is_pr:
self.paths = sorted(self.git.get_diff_names([self.commit_range]))
self.diff = self.git.get_diff([self.commit_range])
else:
commits = self.get_successful_merge_run_commits(self.project_id, self.branch)
last_successful_commit = self.get_last_successful_commit(commits)
if last_successful_commit:
self.paths = sorted(self.git.get_diff_names([last_successful_commit, self.commit]))
self.diff = self.git.get_diff([last_successful_commit, self.commit])
else:
# first run for branch
self.paths = None # act as though change detection not enabled, do not filter targets
self.diff = []
def get_successful_merge_run_commits(self, project_id, branch): # type: (str, str) -> t.Set[str]
"""Return a set of recent successsful merge commits from Shippable for the given project and branch."""
parameters = dict(
isPullRequest='false',
projectIds=project_id,
branch=branch,
)
url = 'https://api.shippable.com/runs?%s' % urlencode(parameters)
http = HttpClient(self.args, always=True)
response = http.get(url)
result = response.json()
if 'id' in result and result['id'] == 4004:
# most likely due to a private project, which returns an HTTP 200 response with JSON
display.warning('Unable to find project. Cannot determine changes. All tests will be executed.')
return set()
commits = set(run['commitSha'] for run in result if run['statusCode'] == 30)
return commits
def get_last_successful_commit(self, successful_commits): # type: (t.Set[str]) -> t.Optional[str]
"""Return the last successful commit from git history that is found in the given commit list, or None."""
commit_history = self.git.get_rev_list(max_count=100)
ordered_successful_commits = [commit for commit in commit_history if commit in successful_commits]
last_successful_commit = ordered_successful_commits[0] if ordered_successful_commits else None
return last_successful_commit
|
2ndQuadrant/ansible
|
test/lib/ansible_test/_internal/ci/shippable.py
|
Python
|
gpl-3.0
| 10,042
|
[
"Octopus"
] |
d02abd8638ddf731011fc24a89b7646414de1fb3cee5a575f1d26e313f2aa014
|
#!/usr/bin/env python3
import argparse
import multiprocessing
from itertools import product
import requests
parser = argparse.ArgumentParser(description="Warm up the tile cache.")
parser.add_argument(
"--url",
type=str,
required=True,
help="Root URL to Navigator instance. Ex: https://navigator.oceansdata.ca or http://10.5.166.251:5000",
)
args = parser.parse_args()
base_url = f"{args.url}/api/v1.0"
def get_tile(
dataset: str,
variable: str,
time: int,
depth: int,
scale: str,
x: int,
y: int,
z: int,
) -> None:
res = requests.get(
f"{base_url}/tiles/gaussian/25/10/EPSG:3857/{dataset}/{variable}/{time}/{depth}/{scale}/{z}/{x}/{y}.png",
timeout=25,
)
if res.status_code != 200:
print(f"Error getting tile {res.status_code} -- {res.url}")
else:
print(f"Fetched {res.url}")
if __name__ == "__main__":
print(f"Starting with {multiprocessing.cpu_count()} processes...")
datasets = list(
filter(
lambda d: "riops" in d or "giops" in d,
map(lambda d: d["id"], requests.get(f"{base_url}/datasets").json()),
)
)
timestamps = list(
map(
lambda t: int(t["id"]),
requests.get(
f"{base_url}/timestamps/?dataset=giops_day&variable=votemper"
).json(),
)
)
zoom_levels = [z for z in range(1, 8)]
x_range = [x for x in range(0, 8)]
y_range = [y for y in range(0, 8)]
nc_timestamp = [timestamps[0]]
print(f"Fetching tiles for {nc_timestamp}")
time = nc_timestamp
depth = [0]
with multiprocessing.Pool(processes=multiprocessing.cpu_count()) as pool:
res1 = pool.starmap_async(
get_tile,
product(
datasets,
["vozocrtx", "vomecrty"],
time,
depth,
["-3,3"],
x_range,
y_range,
zoom_levels,
),
)
res2 = pool.starmap_async(
get_tile,
product(
datasets,
["magwatervel"],
time,
depth,
["0,3"],
x_range,
y_range,
zoom_levels,
),
)
res3 = pool.starmap_async(
get_tile,
product(
datasets,
["votemper"],
time,
depth,
["-5,30"],
x_range,
y_range,
zoom_levels,
),
)
res4 = pool.starmap_async(
get_tile,
product(
datasets,
["vosaline"],
time,
depth,
["30,40"],
x_range,
y_range,
zoom_levels,
),
)
res1.get()
res2.get()
res3.get()
res4.get()
print("Done!")
|
DFO-Ocean-Navigator/Ocean-Data-Map-Project
|
scripts/pregen_tiles.py
|
Python
|
gpl-3.0
| 3,057
|
[
"Gaussian"
] |
cccb764d4089161647ca00867f33909eab2bf66f796e150125c9ed40a24fb304
|
#!/usr/bin/env python
########################################################################
# File : dirac-wms-job-get-output-data
# Author : Stuart Paterson
########################################################################
"""
Retrieve the output data files of a DIRAC job
"""
import DIRAC
from DIRAC.Core.Utilities.DIRACScript import DIRACScript as Script
@Script()
def main():
Script.registerSwitch("D:", "Dir=", "Store the output in this directory")
# Registering arguments will automatically add their description to the help menu
Script.registerArgument(["JobID: DIRAC Job ID"])
sws, args = Script.parseCommandLine(ignoreErrors=True)
from DIRAC.Interfaces.API.Dirac import Dirac, parseArguments
dirac = Dirac()
exitCode = 0
errorList = []
outputDir = ""
for sw, v in sws:
if sw in ("D", "Dir"):
outputDir = v
for job in parseArguments(args):
result = dirac.getJobOutputData(job, destinationDir=outputDir)
if result["OK"]:
print("Job %s output data retrieved" % (job))
else:
errorList.append((job, result["Message"]))
exitCode = 2
for error in errorList:
print("ERROR %s: %s" % error)
DIRAC.exit(exitCode)
if __name__ == "__main__":
main()
|
ic-hep/DIRAC
|
src/DIRAC/Interfaces/scripts/dirac_wms_job_get_output_data.py
|
Python
|
gpl-3.0
| 1,317
|
[
"DIRAC"
] |
027ab3748c451f0185fac1cdd286aed3e65b2faf85cf817f717d438b225e2704
|
"""
API for initiating and tracking requests for credit from a provider.
"""
import datetime
import logging
import pytz
import uuid
from django.db import transaction
from lms.djangoapps.django_comment_client.utils import JsonResponse
from openedx.core.djangoapps.credit.exceptions import (
UserIsNotEligible,
CreditProviderNotConfigured,
RequestAlreadyCompleted,
CreditRequestNotFound,
InvalidCreditStatus,
)
from openedx.core.djangoapps.credit.models import (
CreditProvider,
CreditRequirementStatus,
CreditRequest,
CreditEligibility,
)
from openedx.core.djangoapps.credit.signature import signature, get_shared_secret_key
from student.models import User
from util.date_utils import to_timestamp
log = logging.getLogger(__name__)
def get_credit_providers(providers_list=None):
"""Retrieve all available credit providers or filter on given providers_list.
Arguments:
providers_list (list of strings or None): contains list of ids of credit providers
or None.
Returns:
list of credit providers represented as dictionaries
Response Values:
>>> get_credit_providers(['hogwarts'])
[
{
"id": "hogwarts",
"name": "Hogwarts School of Witchcraft and Wizardry",
"url": "https://credit.example.com/",
"status_url": "https://credit.example.com/status/",
"description: "A new model for the Witchcraft and Wizardry School System.",
"enable_integration": false,
"fulfillment_instructions": "
<p>In order to fulfill credit, Hogwarts School of Witchcraft and Wizardry requires learners to:</p>
<ul>
<li>Sample instruction abc</li>
<li>Sample instruction xyz</li>
</ul>",
},
...
]
"""
return CreditProvider.get_credit_providers(providers_list=providers_list)
def get_credit_provider_info(request, provider_id): # pylint: disable=unused-argument
"""Retrieve the 'CreditProvider' model data against provided
credit provider.
Args:
provider_id (str): The identifier for the credit provider
Returns: 'CreditProvider' data dictionary
Example Usage:
>>> get_credit_provider_info("hogwarts")
{
"provider_id": "hogwarts",
"display_name": "Hogwarts School of Witchcraft and Wizardry",
"provider_url": "https://credit.example.com/",
"provider_status_url": "https://credit.example.com/status/",
"provider_description: "A new model for the Witchcraft and Wizardry School System.",
"enable_integration": False,
"fulfillment_instructions": "
<p>In order to fulfill credit, Hogwarts School of Witchcraft and Wizardry requires learners to:</p>
<ul>
<li>Sample instruction abc</li>
<li>Sample instruction xyz</li>
</ul>",
"thumbnail_url": "https://credit.example.com/logo.png"
}
"""
credit_provider = CreditProvider.get_credit_provider(provider_id=provider_id)
credit_provider_data = {}
if credit_provider:
credit_provider_data = {
"provider_id": credit_provider.provider_id,
"display_name": credit_provider.display_name,
"provider_url": credit_provider.provider_url,
"provider_status_url": credit_provider.provider_status_url,
"provider_description": credit_provider.provider_description,
"enable_integration": credit_provider.enable_integration,
"fulfillment_instructions": credit_provider.fulfillment_instructions,
"thumbnail_url": credit_provider.thumbnail_url
}
return JsonResponse(credit_provider_data)
@transaction.commit_on_success
def create_credit_request(course_key, provider_id, username):
"""
Initiate a request for credit from a credit provider.
This will return the parameters that the user's browser will need to POST
to the credit provider. It does NOT calculate the signature.
Only users who are eligible for credit (have satisfied all credit requirements) are allowed to make requests.
A provider can be configured either with *integration enabled* or not.
If automatic integration is disabled, this method will simply return
a URL to the credit provider and method set to "GET", so the student can
visit the URL and request credit directly. No database record will be created
to track these requests.
If automatic integration *is* enabled, then this will also return the parameters
that the user's browser will need to POST to the credit provider.
These parameters will be digitally signed using a secret key shared with the credit provider.
A database record will be created to track the request with a 32-character UUID.
The returned dictionary can be used by the user's browser to send a POST request to the credit provider.
If a pending request already exists, this function should return a request description with the same UUID.
(Other parameters, such as the user's full name may be different than the original request).
If a completed request (either accepted or rejected) already exists, this function will
raise an exception. Users are not allowed to make additional requests once a request
has been completed.
Arguments:
course_key (CourseKey): The identifier for the course.
provider_id (str): The identifier of the credit provider.
username (str): The user initiating the request.
Returns: dict
Raises:
UserIsNotEligible: The user has not satisfied eligibility requirements for credit.
CreditProviderNotConfigured: The credit provider has not been configured for this course.
RequestAlreadyCompleted: The user has already submitted a request and received a response
from the credit provider.
Example Usage:
>>> create_credit_request(course.id, "hogwarts", "ron")
{
"url": "https://credit.example.com/request",
"method": "POST",
"parameters": {
"request_uuid": "557168d0f7664fe59097106c67c3f847",
"timestamp": 1434631630,
"course_org": "HogwartsX",
"course_num": "Potions101",
"course_run": "1T2015",
"final_grade": "0.95",
"user_username": "ron",
"user_email": "ron@example.com",
"user_full_name": "Ron Weasley",
"user_mailing_address": "",
"user_country": "US",
"signature": "cRCNjkE4IzY+erIjRwOQCpRILgOvXx4q2qvx141BCqI="
}
}
"""
try:
user_eligibility = CreditEligibility.objects.select_related('course').get(
username=username,
course__course_key=course_key
)
credit_course = user_eligibility.course
credit_provider = CreditProvider.objects.get(provider_id=provider_id)
except CreditEligibility.DoesNotExist:
log.warning(
u'User "%s" tried to initiate a request for credit in course "%s", '
u'but the user is not eligible for credit',
username, course_key
)
raise UserIsNotEligible
except CreditProvider.DoesNotExist:
log.error(u'Credit provider with ID "%s" has not been configured.', provider_id)
raise CreditProviderNotConfigured
# Check if we've enabled automatic integration with the credit
# provider. If not, we'll show the user a link to a URL
# where the user can request credit directly from the provider.
# Note that we do NOT track these requests in our database,
# since the state would always be "pending" (we never hear back).
if not credit_provider.enable_integration:
return {
"url": credit_provider.provider_url,
"method": "GET",
"parameters": {}
}
else:
# If automatic credit integration is enabled, then try
# to retrieve the shared signature *before* creating the request.
# That way, if there's a misconfiguration, we won't have requests
# in our system that we know weren't sent to the provider.
shared_secret_key = get_shared_secret_key(credit_provider.provider_id)
if shared_secret_key is None:
msg = u'Credit provider with ID "{provider_id}" does not have a secret key configured.'.format(
provider_id=credit_provider.provider_id
)
log.error(msg)
raise CreditProviderNotConfigured(msg)
# Initiate a new request if one has not already been created
credit_request, created = CreditRequest.objects.get_or_create(
course=credit_course,
provider=credit_provider,
username=username,
)
# Check whether we've already gotten a response for a request,
# If so, we're not allowed to issue any further requests.
# Skip checking the status if we know that we just created this record.
if not created and credit_request.status != "pending":
log.warning(
(
u'Cannot initiate credit request because the request with UUID "%s" '
u'exists with status "%s"'
), credit_request.uuid, credit_request.status
)
raise RequestAlreadyCompleted
if created:
credit_request.uuid = uuid.uuid4().hex
# Retrieve user account and profile info
user = User.objects.select_related('profile').get(username=username)
# Retrieve the final grade from the eligibility table
try:
final_grade = unicode(CreditRequirementStatus.objects.get(
username=username,
requirement__namespace="grade",
requirement__name="grade",
requirement__course__course_key=course_key,
status="satisfied"
).reason["final_grade"])
except (CreditRequirementStatus.DoesNotExist, TypeError, KeyError):
log.exception(
"Could not retrieve final grade from the credit eligibility table "
"for user %s in course %s.",
user.id, course_key
)
raise UserIsNotEligible
parameters = {
"request_uuid": credit_request.uuid,
"timestamp": to_timestamp(datetime.datetime.now(pytz.UTC)),
"course_org": course_key.org,
"course_num": course_key.course,
"course_run": course_key.run,
"final_grade": final_grade,
"user_username": user.username,
"user_email": user.email,
"user_full_name": user.profile.name,
"user_mailing_address": (
user.profile.mailing_address
if user.profile.mailing_address is not None
else ""
),
"user_country": (
user.profile.country.code
if user.profile.country.code is not None
else ""
),
}
credit_request.parameters = parameters
credit_request.save()
if created:
log.info(u'Created new request for credit with UUID "%s"', credit_request.uuid)
else:
log.info(
u'Updated request for credit with UUID "%s" so the user can re-issue the request',
credit_request.uuid
)
# Sign the parameters using a secret key we share with the credit provider.
parameters["signature"] = signature(parameters, shared_secret_key)
return {
"url": credit_provider.provider_url,
"method": "POST",
"parameters": parameters
}
def update_credit_request_status(request_uuid, provider_id, status):
"""
Update the status of a credit request.
Approve or reject a request for a student to receive credit in a course
from a particular credit provider.
This function does NOT check that the status update is authorized.
The caller needs to handle authentication and authorization (checking the signature
of the message received from the credit provider)
The function is idempotent; if the request has already been updated to the status,
the function does nothing.
Arguments:
request_uuid (str): The unique identifier for the credit request.
provider_id (str): Identifier for the credit provider.
status (str): Either "approved" or "rejected"
Returns: None
Raises:
CreditRequestNotFound: No request exists that is associated with the given provider.
InvalidCreditStatus: The status is not either "approved" or "rejected".
"""
if status not in [CreditRequest.REQUEST_STATUS_APPROVED, CreditRequest.REQUEST_STATUS_REJECTED]:
raise InvalidCreditStatus
try:
request = CreditRequest.objects.get(uuid=request_uuid, provider__provider_id=provider_id)
old_status = request.status
request.status = status
request.save()
log.info(
u'Updated request with UUID "%s" from status "%s" to "%s" for provider with ID "%s".',
request_uuid, old_status, status, provider_id
)
except CreditRequest.DoesNotExist:
msg = (
u'Credit provider with ID "{provider_id}" attempted to '
u'update request with UUID "{request_uuid}", but no request '
u'with this UUID is associated with the provider.'
).format(provider_id=provider_id, request_uuid=request_uuid)
log.warning(msg)
raise CreditRequestNotFound(msg)
def get_credit_requests_for_user(username):
"""
Retrieve the status of a credit request.
Returns either "pending", "approved", or "rejected"
Arguments:
username (unicode): The username of the user who initiated the requests.
Returns: list
Example Usage:
>>> get_credit_request_status_for_user("bob")
[
{
"uuid": "557168d0f7664fe59097106c67c3f847",
"timestamp": 1434631630,
"course_key": "course-v1:HogwartsX+Potions101+1T2015",
"provider": {
"id": "HogwartsX",
"display_name": "Hogwarts School of Witchcraft and Wizardry",
},
"status": "pending" # or "approved" or "rejected"
}
]
"""
return CreditRequest.credit_requests_for_user(username)
def get_credit_request_status(username, course_key):
"""Get the credit request status.
This function returns the status of credit request of user for given course.
It returns the latest request status for the any credit provider.
The valid status are 'pending', 'approved' or 'rejected'.
Args:
username(str): The username of user
course_key(CourseKey): The course locator key
Returns:
A dictionary of credit request user has made if any
"""
credit_request = CreditRequest.get_user_request_status(username, course_key)
return {
"uuid": credit_request.uuid,
"timestamp": credit_request.modified,
"course_key": credit_request.course.course_key,
"provider": {
"id": credit_request.provider.provider_id,
"display_name": credit_request.provider.display_name
},
"status": credit_request.status
} if credit_request else {}
|
rismalrv/edx-platform
|
openedx/core/djangoapps/credit/api/provider.py
|
Python
|
agpl-3.0
| 15,435
|
[
"VisIt"
] |
349bf20a119dacfbcedcbbfa4272422215860b859ede01896d4f04cc7386983e
|
'''test methods related to vagrant'''
from behave import *
import ConfigParser
config = ConfigParser.ConfigParser()
config.read('config/uat.cfg')
remote_user = config.get("vagrant", "user")
vagrant_build_dependencies = "ruby,ruby-devel,ruby-libvirt,rubygem-ruby-libvirt,libvirt,libvirt-devel,rubygem-bundler,rubygem-bundler-doc,rubygem-nokogiri,libxml2-devel,libxslt-devel,rubygem-rake"
@given(u'vagrant plugin is "{plugin_name}"')
def step_impl(context, plugin_name):
context.vagrant_plugin = plugin_name
@given(u'install vagrant plugin')
def step_impl(context):
assert context.remote_cmd("command",
context.target_host,
remote_user=remote_user,
module_args="vagrant plugin install %s" % context.vagrant_plugin)
@then(u'vagrant plugin is verified as installed')
@given(u'vagrant plugin is verified as installed')
def step_impl(context):
r = context.remote_cmd("command",
context.target_host,
remote_user=remote_user,
module_args="vagrant plugin list")
for i in r:
assert i['stdout'].index(context.vagrant_plugin) >= 0
@given(u'vagrant box "{box}" is already installed')
def step_impl(context, box, host="cihosts"):
r = context.remote_cmd("command",
host,
remote_user=remote_user,
module_args="vagrant box list")
for i in r:
assert i['stdout'].index(box) >= 0
@given(u'source of the plugin is cloned from "{url}"')
def step_impl(context, url):
context.execute_steps(u"""
Given clone "{project_name}" from "{url_name}"
""".format(project_name=context.vagrant_plugin, url_name=url))
@given(u'Clone CDK from "{url}"')
def step_impl(context, url, host="cihosts"):
assert context.remote_cmd("git",
host,
remote_user=remote_user,
module_args="repo=%s dest=~/cdk" % url)
@when(u'Vagrantfile is linked')
def step_impl(context, host="cihosts"):
assert context.remote_cmd("file",
host,
remote_user=remote_user,
module_args="src=~/cdk/components/standalone-rhel/Vagrantfile dest=~/Vagrantfile state=link")
@when(u'vagrant up')
def step_impl(context, host="cihosts"):
assert context.remote_cmd("command",
host,
remote_user=remote_user,
module_args="vagrant up")
@then(u'vagrant connect to "{guest}"')
def step_impl(context, guest, host="cihosts"):
assert context.remote_cmd("command",
host,
remote_user=remote_user,
module_args="vagrant up")
@then(u'vagrant "{guest}" is destroyed')
def step_impl(context, guest, host="cihosts"):
assert context.remote_cmd("command",
host,
remote_user=remote_user,
module_args="vagrant up")
@then(u'vagrant "{guest}" is auto-subscribed')
def step_impl(context, guest, host="cihosts"):
assert context.remote_cmd("command",
host,
remote_user=remote_user,
module_args="vagrant ssh -c 'sudo subscription-manager status'")
#requires querying the customer portal to find out if the registration was remove (the box it was on is gone)
#@then(u'vagrant "{guest}" is unsubscribed and unregistered')
#def step_impl(context, guest, host="cihosts"):
# assert False
@given(u'vagrant is installed')
def step_impl(context, host="cihosts"):
assert context.remote_cmd("command",
host,
remote_user=remote_user,
module_args="which vagrant")
#not sure why this doesn't work
@given(u'vagrant plugin build dependencies are installed')
def step_impl(context, host="cihosts"):
context.execute_steps(u"""
given "{package_names}" are already installed on "{vagrant_host}"
""".format(package_names=vagrant_build_dependencies,vagrant_host=context.target_host))
#def step_impl(context, vagrant_plugin, host="cihosts"):
@given(u'bundler has been used to install ruby dependencies')
def step_impl(context):
assert context.remote_cmd("command",
context.target_host,
remote_user=remote_user,
module_args="cd ~/%s && bundle config build.nokogiri --use-system-libraries && bundle install" % context.vagrant_plugin)
@when(u'vagrant plugin is built')
def step_impl(context):
assert context.remote_cmd("command",
context.target_host,
remote_user=remote_user,
module_args="cd ~/%s && rake build" % context.vagrant_plugin)
@then(u'local "vagrant-registration" gem is successfully installed')
def step_impl(context):
context.execute_steps(u"""
given vagrant plugin is verified as installed
""")
|
jlebon/UATFramework
|
steps/vagrant.py
|
Python
|
gpl-2.0
| 5,287
|
[
"CDK"
] |
0032dc9a68c2cff06ae604a6f171eebab47efcd88b88f8e486f0e77224960d5a
|
#-*- coding:utf-8 -*-
"""
This file is part of openexp.
openexp is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
openexp is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with openexp. If not, see <http://www.gnu.org/licenses/>.
"""
import random
import pygame
import math
from libopensesame.exceptions import osexception
# If available, use the yaml.inherit metaclass to copy the docstrings from
# canvas onto the back-end-specific implementations of this class (legacy, etc.)
try:
from yamldoc import inherit as docinherit
except:
docinherit = type
class canvas(object):
"""
desc: |
The `canvas` class is used for display presentation.
__Important note:__
When using a `canvas` all coordinates are specified
relative to the top-left of the display, and not, as in `sketchpad`s,
relative to the display center.
__Example__:
~~~ {.python}
# Create a canvas with a central fixation dot and show it.
from openexp.canvas import canvas
my_canvas = canvas(exp)
my_canvas.fixdot()
my_canvas.show()
~~~
__Function list:__
%--
toc:
mindepth: 2
maxdepth: 2
--%
%--
constant:
arg_fgcolor: |
A human-readable foreground color, such as 'red', an
HTML-style color value, such as '#FF0000', or `None` to use the
canvas default. This argument will not change the canvas default
foreground as set by [canvas.set_fgcolor].
arg_bgcolor: |
A human-readable background color, such as 'red', an
HTML-style color value, such as '#FF0000', or `None` to use the
canvas default. This argument will not change the canvas default
background as set by [canvas.set_bgcolor].
arg_penwidth: |
A penwidth in pixels, or `None` to use the canvas default. This
argument will not change the canvas default penwidth as set by
[canvas.set_penwidth].
arg_max_width: |
The maximum width of the text in pixels, before wrapping to a
new line, or `None` to wrap at screen edge.
arg_bidi: |
A bool indicating bi-directional text support should be enabled,
or `None` to use the experiment default. This does not affect
the canvas default bidi setting as set by [canvas.set_bidi].
arg_html: |
A bool indicating whether a subset of HTML tags should be
interpreted. For more information, see </usage/text/>.
arg_bgmode: |
Specifies whether the background is the average of col1 col2
('avg', corresponding to a typical Gabor patch), or equal to
col2 ('col2'), useful for blending into the background. Note:
this parameter is ignored by the psycho backend, which uses
increasing transparency for the background.
arg_fill: |
Specifies whether the shape should be filled (True) or consist
of an outline (False).
--%
"""
__metaclass__ = docinherit
def __init__(self, experiment, bgcolor=None, fgcolor=None,
auto_prepare=True):
"""
desc:
Constructor to create a new `canvas` object.
arguments:
experiment:
desc: The experiment object.
type: experiment
keywords:
bgcolor:
desc: A human-readable background color or None to use
experiment default.
type: [str, unicode, NoneType]
fgcolor:
desc: A human-readable foreground color or None to use
experiment default.
type: [str, unicode, NoneType]
auto_prepare:
desc: Indicates whether the canvas should be automatically
prepared after each drawing operation, so that
[canvas.show] will be maximally efficient. If
auto_prepare is turned off, drawing operations may
be faster, but [canvas.show] will take longer,
unless [canvas.prepare] is explicitly called in
advance. Generally, it only makes sense to disable
auto_prepare when you want to draw a large number
of stimuli, as in the second example below.
Currently, the auto_prepare parameter only applies
to the xpyriment backend, and is ignored by the
other backends.
type: bool
example: |
# Example 1: Show a central fixation dot.
from openexp.canvas import canvas
my_canvas = canvas(exp)
my_canvas.fixdot()
my_canvas.show()
# Example 2: Show many randomly positioned fixation dot. Here we
# disable `auto_prepare`, so that drawing goes more quickly.
from openexp.canvas import canvas
from random import randint
my_canvas = canvas(exp, auto_prepare=False)
for i in range(1000):
x = randint(0, self.get('width'))
y = randint(0, self.get('height'))
my_canvas.fixdot(x, y)
my_canvas.prepare()
my_canvas.show()
"""
raise NotImplementedError()
def color(self, color):
"""
desc:
Transforms a "human-readable" color into the format that is used by
the back-end (e.g., a PyGame color).
visible: False
arguments:
color: |
A color in one the following formats (by example):
- 255, 255, 255 (rgb)
- 255, 255, 255, 255 (rgba)
- #f57900 (case-insensitive html)
- 100 (integer intensity value 0 .. 255, for gray-scale)
- 0.1 (float intensity value 0 .. 1.0, for gray-scale)
returns:
A color in a back-end-specific format.
"""
raise NotImplementedError()
def copy(self, canvas):
"""
desc: |
Turns the current `canvas` into a copy of the passed `canvas`.
__Note:__
If you want to create a copy of a `sketchpad` `canvas`, you can also
use the `inline_script.copy_sketchpad` function.
arguments:
canvas:
desc: The `canvas` to copy.
type: canvas
example: |
from openexp.canvas import canvas
my_canvas = canvas(exp)
my_canvas.fixdot(x=100, color='green')
my_copied_canvas = canvas(exp)
my_copied_canvas.copy(my_canvas)
my_copied_canvas.fixdot(x=200, color="blue")
my_copied_canvas.show()
"""
raise NotImplementedError()
def xcenter(self):
"""
desc:
Returns the center X coordinate of the `canvas` in pixels.
returns:
desc: The center X coordinate.
type: int
example: |
# Draw a diagonal line through the center of the canvas
from openexp.canvas import canvas
my_canvas = canvas(exp)
x1 = my_canvas.xcenter() - 100
y1 = my_canvas.ycenter() - 100
x2 = my_canvas.xcenter() + 100
y2 = my_canvas.ycenter() + 100
my_canvas.line(x1, y1, x2, y2)
"""
return self.experiment.get(u'width') / 2
def ycenter(self):
"""
desc:
Returns the center Y coordinate of the `canvas` in pixels.
returns:
desc: The center Y coordinate.
type: int
example: |
# Draw a diagonal line through the center of the canvas
from openexp.canvas import canvas
my_canvas = canvas(exp)
x1 = my_canvas.xcenter() - 100
y1 = my_canvas.ycenter() - 100
x2 = my_canvas.xcenter() + 100
y2 = my_canvas.ycenter() + 100
my_canvas.line(x1, y1, x2, y2)
"""
return self.experiment.get(u'height') / 2
def prepare(self):
"""
desc:
Finishes pending canvas operations (if any), so that a subsequent
call to [canvas.show] is extra fast. It's only necessary to call
this function if you have disabled `auto_prepare` in
[canvas.__init__].
"""
pass
def show(self):
"""
desc:
Shows, or 'flips', the canvas on the screen.
returns:
desc:
A timestamp of the time at which the canvas actually appeared on
the screen, or a best guess if precise temporal information is
not available. For more information about timing, see
</misc/timing>. Depending on the back-end the timestamp is an
`int` or a `float`.
type:
[int, float]
example: |
from openexp.canvas import canvas
my_canvas = canvas(exp)
my_canvas.fixdot()
t = my_canvas.show()
exp.set('time_fixdot', t)
"""
raise NotImplementedError()
def clear(self, color=None):
"""
desc:
Clears the canvas with the current background color. Note that it is
generally faster to use a different canvas for each experimental
display than to use a single canvas and repeatedly clear and redraw
it.
keywords:
color:
desc: "%arg_bgcolor"
type: [str, unicode, NoneType]
example: |
from openexp.canvas import canvas
my_canvas = canvas(exp)
my_canvas.fixdot(color='green')
my_canvas.show()
self.sleep(1000)
my_canvas.clear()
my_canvas.fixdot(color='red')
my_canvas.show()
"""
raise NotImplementedError()
def set_bidi(self, bidi):
"""
desc:
Enables or disables bi-directional text support.
arguments:
bidi:
desc: True to enable bi-directional text support, False to
disable.
type: bool
example: |
from openexp.canvas import canvas
my_canvas = canvas(exp)
my_canvas.set_bidi(True)
my_canvas.text(u'חלק מטקסט')
"""
self.bidi = bidi
def set_penwidth(self, penwidth):
"""
desc:
Sets the default penwidth for subsequent drawing operations.
arguments:
penwidth:
desc: A penwidth in pixels.
type: int
example: |
from openexp.canvas import canvas
my_canvas = canvas(exp)
my_canvas.set_penwidth(10)
my_canvas.line(100, 100, 200, 200)
"""
self.penwidth = penwidth
def set_fgcolor(self, color):
"""
desc:
Sets the default foreground color for subsequent drawing operations.
arguments:
color:
desc: "%arg_fgcolor"
type: [str, unicode]
example: |
from openexp.canvas import canvas
my_canvas = canvas(exp)
my_canvas.set_fgcolor('green')
my_canvas.text('Green text', y=200)
my_canvas.set_fgcolor('red')
my_canvas.text('Red text', y=400)
"""
self.fgcolor = self.color(color)
def set_bgcolor(self, color):
"""
desc:
Sets the default background color for subsequent drawing operations,
notably [canvas.clear].
arguments:
color:
desc: "%arg_bgcolor"
type: [str, unicode]
example: |
from openexp.canvas import canvas
my_canvas = canvas(exp)
my_canvas.set_bgcolor('gray')
my_canvas.clear()
"""
self.bgcolor = self.color(color)
def set_font(self, style=None, size=None, italic=None, bold=None,
underline=None):
"""
desc:
Sets the default font for subsequent drawing operations, notably
[canvas.text].
keywords:
style:
desc: A font family. This can be one of the default fonts
(e.g., 'mono'), a system font (e.g., 'arial'), the
name of a `.ttf` font file in the file pool (without
the `.ttf` extension), or `None` to use the experiment
default.
type: [str, unicode]
size:
desc: A font size in pixels, or `None` to use the experiment
default.
type: int
italic:
desc: A bool indicating whether the font should be italic, or
`None` to use the experiment default.
type: bool, NoneType
bold:
desc: A bool indicating whether the font should be bold, or
`None` to use the experiment default.
type: bool, NoneType
underline:
desc: A bool indicating whether the font should be underlined,
or `None` to use the experiment default.
type: bool, NoneType
example: |
from openexp.canvas import canvas
my_canvas = canvas(exp)
my_canvas.set_font(style='serif', italic=True)
my_canvas.text('Text in italic serif')
"""
if style != None: self.font_style = style
if size != None: self.font_size = size
if italic != None: self.font_italic = italic
if bold != None: self.font_bold = bold
if underline != None: self.font_underline = underline
def fixdot(self, x=None, y=None, color=None, style=u'default'):
"""
desc: |
Draws a fixation dot.
- 'large-filled' is a filled circle with a 16px radius.
- 'medium-filled' is a filled circle with an 8px radius.
- 'small-filled' is a filled circle with a 4px radius.
- 'large-open' is a filled circle with a 16px radius and a 2px hole.
- 'medium-open' is a filled circle with an 8px radius and a 2px hole.
- 'small-open' is a filled circle with a 4px radius and a 2px hole.
- 'large-cross' is 16px cross.
- 'medium-cross' is an 8px cross.
- 'small-cross' is a 4px cross.
keywords:
x:
desc: The X coordinate of the dot center, or None to draw a
horizontally centered dot.
type: [int, NoneType]
y:
desc: The Y coordinate of the dot center, or None to draw a
vertically centered dot.
type: [int, NoneType]
color:
desc: "%arg_fgcolor"
type: [str, unicode, NoneType]
style:
desc: |
The fixation-dot style. One of: default, large-filled,
medium-filled, small-filled, large-open, medium-open,
small-open, large-cross, medium-cross, or small-cross.
default equals medium-open.
type: [str, unicode]
example: |
from openexp.canvas import canvas
my_canvas = canvas(exp)
my_canvas.fixdot()
"""
if color != None:
color = self.color(color)
else:
color = self.fgcolor
if x == None:
x = self.xcenter()
if y == None:
y = self.ycenter()
h = 2
if u'large' in style:
s = 16
elif u'medium' in style or style == u'default':
s = 8
elif u'small' in style:
s = 4
else:
raise osexception(u'Unknown style: %s' % self.style)
if u'open' in style or style == u'default':
self.ellipse(x-s, y-s, 2*s, 2*s, True, color=color)
self.ellipse(x-h, y-h, 2*h, 2*h, True, color=self.bgcolor)
elif u'filled' in style:
self.ellipse(x-s, y-s, 2*s, 2*s, True, color=color)
elif u'cross' in style:
self.line(x, y-s, x, y+s, color=color)
self.line(x-s, y, x+s, y, color=color)
else:
raise osexception(u'Unknown style: %s' % self.style)
def circle(self, x, y, r, fill=False, color=None, penwidth=None):
"""
desc:
Draws a circle.
arguments:
x:
desc: The center X coordinate of the circle.
type: int
y:
desc: The center Y coordinate of the circle.
type: int
r:
desc: The radius of the circle.
type: int
keywords:
fill:
desc: "%arg_fill"
type: bool
color:
desc: "%arg_fgcolor"
type: [str, unicode, NoneType]
penwidth:
desc: "%arg_penwidth"
type: int
example: |
from openexp.canvas import canvas
my_canvas = canvas(exp)
my_canvas.circle(100, 100, 50, fill=True, color='red')
"""
self.ellipse(x-r, y-r, 2*r, 2*r, fill=fill, color=color,
penwidth=penwidth)
def line(self, sx, sy, ex, ey, color=None, penwidth=None):
"""
desc:
Draws a line.
arguments:
sx:
desc: The left X coordinate.
type: int
sy:
desc: The top Y coordinate.
type: int
ex:
desc: The right X coordinate.
type: int
ey:
desc: The bottom Y coordinate.
type: int
keywords:
color:
desc: "%arg_fgcolor"
type: [str, unicode, NoneType]
penwidth:
desc: "%arg_penwidth"
type: int
Example: |
from openexp.canvas import canvas
my_canvas = canvas(exp)
w = self.get('width')
h = self.get('height')
my_canvas.line(0, 0, w, h)
"""
raise NotImplementedError()
def arrow(self, sx, sy, ex, ey, arrow_size=5, color=None, penwidth=None):
"""
desc:
Draws an arrow. An arrow is a line, with an arrowhead at (ex, ey).
The angle between the arrowhead lines and the arrow line is 45
degrees.
arguments:
sx:
desc: The left X coordinate.
type: int
sy:
desc: The top Y coordinate.
type: int
ex:
desc: The right X coordinate.
type: int
ey:
desc: The bottom Y coordinate.
type: int
keywords:
arrow_size:
desc: The length of the arrow-head lines in pixels.
type: int
color:
desc: "%arg_fgcolor"
type: [str, unicode, NoneType]
penwidth:
desc: "%arg_penwidth"
type: int
Example: |
from openexp.canvas import canvas
my_canvas = canvas(exp)
w = self.get('width')/2
h = self.get('height')/2
my_canvas.arrow(0, 0, w, h, arrow_size=10)
"""
self.line(sx, sy, ex, ey, color=color, penwidth=penwidth)
a = math.atan2(ey - sy, ex - sx)
_sx = ex + arrow_size * math.cos(a + math.radians(135))
_sy = ey + arrow_size * math.sin(a + math.radians(135))
self.line(_sx, _sy, ex, ey, color=color, penwidth=penwidth)
_sx = ex + arrow_size * math.cos(a + math.radians(225))
_sy = ey + arrow_size * math.sin(a + math.radians(225))
self.line(_sx, _sy, ex, ey, color=color, penwidth=penwidth)
def rect(self, x, y, w, h, fill=False, color=None, penwidth=None):
"""
desc:
Draws a rectangle.
arguments:
x:
desc: The left X coordinate.
type: int
y:
desc: The top Y coordinate.
type: int
w:
desc: The width.
type: int
h:
desc: The height.
type: int
keywords:
fill:
desc: "%arg_fill"
type: bool
color:
desc: "%arg_fgcolor"
type: [str, unicode, NoneType]
penwidth:
desc: "%arg_penwidth"
type: int
example: |
from openexp.canvas import canvas
my_canvas = canvas(exp)
w = self.get('width')-10
h = self.get('height')-10
my_canvas.rect(10, 10, w, h, fill=True)
"""
raise NotImplementedError()
def ellipse(self, x, y, w, h, fill=False, color=None, penwidth=None):
"""
desc:
Draws an ellipse.
arguments:
x:
desc: The left X coordinate.
type: int
y:
desc: The top Y coordinate.
type: int
w:
desc: The width.
type: int
h:
desc: The height.
type: int
keywords:
fill:
desc: "%arg_fill"
type: bool
color:
desc: "%arg_fgcolor"
type: [str, unicode, NoneType]
penwidth:
desc: "%arg_penwidth"
type: int
example: |
from openexp.canvas import canvas
my_canvas = canvas(exp)
w = self.get('width')-10
h = self.get('height')-10
my_canvas.ellipse(10, 10, w, h, fill=True)
"""
raise NotImplementedError()
def polygon(self, vertices, fill=False, color=None, penwidth=None):
"""
desc:
Draws a polygon that defined by a list of vertices. I.e. a shape of
points connected by lines.
arguments:
vertices:
desc: A list of tuples, where each tuple corresponds to a
vertex. For example, [(100,100), (200,100), (100,200)]
will draw a triangle.
type: list
keywords:
fill:
desc: "%arg_fill"
type: bool
color:
desc: "%arg_fgcolor"
type: [str, unicode, NoneType]
penwidth:
desc: "%arg_penwidth"
type: int
example: |
from openexp.canvas import canvas
my_canvas = canvas(exp)
n1 = 0,0
n2 = 100, 100
n3 = 0, 100
my_canvas.polygon([n1, n2, n3])
"""
raise NotImplementedError()
def text_size(self, text, max_width=None, bidi=None, html=True):
"""
desc:
Determines the size of a text string in pixels.
arguments:
text:
desc: A string of text.
type: [str, unicode]
keywords:
max_width:
desc: "%arg_max_width"
type: [int, NoneType]
bidi:
desc: "%arg_bidi"
type: [bool, NoneType]
html:
desc: "%arg_html"
type: bool
returns:
desc: A (width, height) tuple containing the dimensions of the
text string.
type: tuple
example: |
from openexp.canvas import canvas
my_canvas = canvas(exp)
w, h = my_canvas.text_size('Some text')
"""
self.html.reset()
width, height = self.html.render(text, 0, 0, self, max_width=max_width,
html=html, bidi=bidi, dry_run=True)
return width, height
def text(self, text, center=True, x=None, y=None, max_width=None,
color=None, bidi=None, html=True):
"""
desc:
Draws text.
arguments:
text:
desc: A string of text.
type: [str, unicode]
keywords:
center:
desc: A bool indicating whether the coordinates reflect the
center (True) or top-left (False) of the text.
type: bool
x:
desc: The X coordinate, or None to draw horizontally centered
text.
type: [int, NoneType]
y:
desc: The Y coordinate, or None to draw vertically centered
text.
type: [int, NoneType]
max_width:
desc: "%arg_max_width"
type: [int, NoneType]
color:
desc: "%arg_fgcolor"
type: [str, unicode, NoneType]
bidi:
desc: "%arg_bidi"
type: [bool, NoneType]
html:
desc: "%arg_html"
type: bool
example: |
from openexp.canvas import canvas
my_canvas = canvas(exp)
my_canvas.text('Some text with <b>boldface</b> and <i>italics</i>')
"""
if color != None: color = self.color(color)
else: color = self.fgcolor
if bidi == None: bidi = self.bidi
if x == None: x = self.xcenter()
if y == None: y = self.ycenter()
self.html.reset()
self.html.render(text, x, y, self, max_width=max_width, center=center, \
color=color, html=html, bidi=bidi)
def _text(self, text, x, y):
"""
desc:
A simple function that renders a string of text with the canvas
default settings. This function needs to be re-implemented in
each back-ends, as it handles actual text rendering.
visible: False
arguments:
text:
desc: A string of text.
type: [str, unicode]
x:
desc: The X coordinate.
type: int
y:
desc: The Y coordinate.
type: int
"""
raise NotImplementedError()
def _text_size(self, text):
"""
desc:
Determines the size of a string of text for the default font. This
function is for internal use, and should be re-implemented for each
back-end.
visible: False
arguments:
text:
desc: A string of text.
type: [str, unicode]
returns:
desc: A (width, height) tuple.
type: tuple
"""
raise NotImplementedError()
def textline(self, text, line, color=None):
"""
desc: |
A convenience function that draws a line of text based on a line
number. The text strings are centered on the X-axis and vertically
spaced with 1.5 times the line height as determined by text_size().
__Note:__
This function has been deprecated.
visible: False
arguments:
text:
desc: A string of text.
type: [str, unicode]
line:
desc: A line number, where 0 is the center and > 0 is below
the center.
type: int
keywords:
color:
desc: "%arg_fgcolor"
type: [str, unicode, NoneType]
"""
size = self.text_size(text)
self.text(text, True, self.xcenter(), self.ycenter()+1.5*line*size[1],
color=color)
def image(self, fname, center=True, x=None, y=None, scale=None):
"""
desc:
Draws an image from file. This function does not look in the file
pool, but takes an absolute path.
arguments:
fname:
desc: The filename of the image. If this is a `str` it is
assumed to be in utf-8 encoding.
type: [str, unicode]
keywords:
center:
desc: A bool indicating whether coordinates indicate the
center (True) or top-left (False).
type: bool
x:
desc: The X coordinate, or `None` to draw a horizontally
centered image.
type: [int, NoneType]
y:
desc: The Y coordinate, or `None` to draw a vertically
centered image.
type: [int, NoneType]
scale:
desc: The scaling factor of the image. `None` or 1 indicate
the original size. 2.0 indicates a 200% zoom, etc.
type: [float, int, NoneType]
example: |
from openexp.canvas import canvas
my_canvas = canvas(exp)
# Determine the absolute path:
path = exp.get_file(u'image_in_pool.png')
my_canvas.image(path)
"""
raise NotImplementedError()
def gabor(self, x, y, orient, freq, env=u'gaussian', size=96, stdev=12,
phase=0, col1=u'white', col2=u'black', bgmode=u'avg'):
"""
desc: |
Draws a Gabor patch. Note: The exact rendering of the Gabor patch
depends on the back-end.
arguments:
x:
desc: The center X coordinate.
type: int
y:
desc: The center Y coordinate.
type: int
orient:
desc: Orientation in degrees [0 .. 360].
type: [float, int]
freq:
desc: Frequency in cycles/px of the sinusoid.
type: [float, int]
keywords:
env:
desc: The envelope that determines the shape of the patch. Can
be "gaussian", "linear", "circular", or "rectangular".
type: [str, unicode]
size:
desc: A size in pixels.
type: [float, int]
stdev:
desc: Standard deviation in pixels of the gaussian. Only
applicable to gaussian envelopes.
type: [float, int]
phase:
desc: Phase of the sinusoid [0.0 .. 1.0].
type: [float, int]
col1:
desc: A color for the peaks.
type: [str, unicode]
col2:
desc: |
A color for the troughs. Note: The psycho back-end
ignores this parameter and always uses the inverse of
`col1` for the throughs.
type: [str, unicode]
bgmode:
desc: "%arg_bgmode"
type: [str, unicode]
example: |
from openexp.canvas import canvas
my_canvas = canvas(exp)
my_canvas.gabor(100, 100, 45, .05)
"""
raise NotImplementedError()
def noise_patch(self, x, y, env=u'gaussian', size=96, stdev=12,
col1=u'white', col2=u'black', bgmode=u'avg'):
"""
desc:
Draws a patch of noise, with an envelope. The exact rendering of the
noise patch depends on the back-end.
arguments:
x:
desc: The center X coordinate.
type: int
y:
desc: The center Y coordinate.
type: int
keywords:
env:
desc: The envelope that determines the shape of the patch. Can
be "gaussian", "linear", "circular", or "rectangular".
type: [str, unicode]
size:
desc: A size in pixels.
type: [float, int]
stdev:
desc: Standard deviation in pixels of the gaussian. Only
applicable to gaussian envelopes.
type: [float, int]
col1:
desc: The first color.
type: [str, unicode]
col2:
desc: |
The second color. Note: The psycho back-end ignores this
parameter and always uses the inverse of `col1`.
type: [str, unicode]
bgmode:
desc: "%arg_bgmode"
type: [str, unicode]
example: |
from openexp.canvas import canvas
my_canvas = canvas(exp)
my_canvas.noise_patch(100, 100, env='circular')
"""
raise NotImplementedError()
def init_display(experiment):
"""
desc:
Initializes the display before the experiment begins.
arguments:
experiment:
desc: An experiment object.
type: experiment
"""
raise NotImplementedError()
def close_display(experiment):
"""
desc:
Closes the display after the experiment is finished.
arguments:
experiment:
desc: An experiment object.
type: experiment
"""
raise NotImplementedError()
# Translation mapping from envelope names
env_synonyms = {}
env_synonyms[u"c"] = u"c"
env_synonyms[u"circular"] = u"c"
env_synonyms[u"round"] = u"c"
env_synonyms[u"g"] = u"g"
env_synonyms[u"gaussian"] = u"g"
env_synonyms[u"gauss"] = u"g"
env_synonyms[u"normal"] = u"g"
env_synonyms[u"r"] = u"r"
env_synonyms[u"rectangular"] = u"r"
env_synonyms[u"rectangle"] = u"r"
env_synonyms[u"g"] = u"g"
env_synonyms[u"rect"] = u"g"
env_synonyms[u"square"] = u"g"
env_synonyms[None] = u"g"
env_synonyms[u"l"] = u"l"
env_synonyms[u"linear"] = u"l"
env_synonyms[u"lin"] = u"l"
env_synonyms[u"ln"] = u"l"
env_synonyms[u"l"] = u"l"
canvas_cache = {}
def _gabor(orient, freq, env=u"gaussian", size=96, stdev=12, phase=0,
col1=u"white", col2=u"black", bgmode=u"avg"):
"""
desc:
Returns a pygame surface containing a Gabor patch. For arguments,
see [canvas.gabor].
"""
env = _match_env(env)
# Generating a Gabor patch takes quite some time, so keep
# a cache of previously generated Gabor patches to speed up
# the process.
global canvas_cache
key = u"gabor_%s_%s_%s_%s_%s_%s_%s_%s_%s" % (orient, freq, env, size,
stdev, phase, col1, col2, bgmode)
if key in canvas_cache:
return canvas_cache[key]
# Create a surface
surface = pygame.Surface( (size, size) )
try:
px = pygame.PixelArray(surface)
except:
px = None
# Conver the orientation to radians
orient = math.radians(orient)
col1 = _color(col1)
col2 = _color(col2)
# rx and ry reflect the real coordinates in the
# target image
for rx in range(size):
for ry in range(size):
# Distance from the center
dx = rx - 0.5 * size
dy = ry - 0.5 * size
# Get the coordinates (x, y) in the unrotated
# Gabor patch
t = math.atan2(dy, dx) + orient
r = math.sqrt(dx ** 2 + dy ** 2)
ux = r * math.cos(t)
uy = r * math.sin(t)
# Get the amplitude without the envelope (0 .. 1)
amp = 0.5 + 0.5 * math.cos(2.0 * math.pi * (ux * freq + phase))
# The envelope adjustment
if env == "g":
f = math.exp(-0.5 * (ux / stdev) ** 2 - 0.5 * (uy / stdev) ** 2)
elif env == "l":
f = max(0, (0.5 * size - r) / (0.5 * size))
elif env == "c":
if (r > 0.5 * size):
f = 0.0
else:
f = 1.0
else:
f = 1.0
# Apply the envelope
if bgmode == u"avg":
amp = amp * f + 0.5 * (1.0 - f)
else:
amp = amp * f
r = col1.r * amp + col2.r * (1.0 - amp)
g = col1.g * amp + col2.g * (1.0 - amp)
b = col1.b * amp + col2.b * (1.0 - amp)
if px is None:
surface.set_at((rx, ry), (round(r), round(g), round(b)))
else:
px[rx][ry] = round(r), round(g), round(b)
canvas_cache[key] = surface
del px
return surface
def _noise_patch(env=u"gaussian", size=96, stdev=12, col1=u"white",
col2=u"black", bgmode=u"avg"):
"""
desc:
Returns a pygame surface containing a noise patch. For arguments,
see [canvas.noise_patch].
"""
env = _match_env(env)
# Generating a noise patch takes quite some time, so keep
# a cache of previously generated noise patches to speed up
# the process.
global canvas_cache
key = u"noise_%s_%s_%s_%s_%s_%s" % (env, size, stdev, col1, col2, bgmode)
if key in canvas_cache:
return canvas_cache[key]
# Create a surface
surface = pygame.Surface( (size, size) )
try:
px = pygame.PixelArray(surface)
except:
px = None
col1 = _color(col1)
col2 = _color(col2)
# rx and ry reflect the real coordinates in the
# target image
for rx in range(size):
for ry in range(size):
# Distance from the center
ux = rx - 0.5 * size
uy = ry - 0.5 * size
r = math.sqrt(ux ** 2 + uy ** 2)
# Get the amplitude without the envelope (0 .. 1)
amp = random.random()
# The envelope adjustment
if env == u"g":
f = math.exp(-0.5 * (ux / stdev) ** 2 - 0.5 * (uy / stdev) ** 2)
elif env == u"l":
f = max(0, (0.5 * size - r) / (0.5 * size))
elif env == u"c":
if (r > 0.5 * size):
f = 0.0
else:
f = 1.0
else:
f = 1.0
# Apply the envelope
if bgmode == u"avg":
amp = amp * f + 0.5 * (1.0 - f)
else:
amp = amp * f
r = col1.r * amp + col2.r * (1.0 - amp)
g = col1.g * amp + col2.g * (1.0 - amp)
b = col1.b * amp + col2.b * (1.0 - amp)
if px is None:
surface.set_at((rx, ry), (round(r), round(g), round(b)))
else:
px[rx][ry] = round(r), round(g), round(b)
canvas_cache[key] = surface
del px
return surface
def _match_env(env):
"""
desc:
Translation between various envelope names.
arguments:
env:
desc: An envelope name.
type: [str, unicode]
returns:
desc: A standard envelope name ("c", "g", "r" or "l")
type: unicode
"""
global env_synonyms
if env not in env_synonyms:
raise osexception(u"'%s' is not a valid envelope" % env)
return env_synonyms[env]
def _color(color):
"""
desc:
Creates a PyGame color object.
returns:
A pygame color object.
"""
if isinstance(color, unicode):
return pygame.Color(str(color))
if isinstance(color, str):
return pygame.Color(color)
if isinstance(color, int):
return pygame.Color(color, color, color, 255)
if isinstance(color, float):
i = int(255 * color)
return pygame.Color(i, i, i, 255)
if isinstance(color, tuple):
if len(color) == 3:
return pygame.Color(color[0], color[1], color[2], 255)
if len(color) > 3:
return pygame.Color(color[0], color[1], color[2], color[3])
raise osexception(u'Unknown color: %s' % color)
if isinstance(color, pygame.Color):
return color
raise osexception(u'Unknown color: %s' % color)
|
SCgeeker/OpenSesame
|
openexp/_canvas/canvas.py
|
Python
|
gpl-3.0
| 31,992
|
[
"Gaussian"
] |
024f07d7432568e80927d6c1a32b1f0fcc1436085c932d6aad8a0c4f994a85d9
|
""" Job accounting type.
Filled by the JobWrapper (by the jobs) and by the agent "WorloadManagement/StalledJobAgent"
"""
import DIRAC
from DIRAC.AccountingSystem.Client.Types.BaseAccountingType import BaseAccountingType
class Job(BaseAccountingType):
def __init__(self):
super(Job, self).__init__()
self.definitionKeyFields = [
("User", "VARCHAR(64)"),
("UserGroup", "VARCHAR(32)"),
("JobGroup", "VARCHAR(64)"),
("JobType", "VARCHAR(32)"),
("JobClass", "VARCHAR(32)"),
("ProcessingType", "VARCHAR(256)"),
("Site", "VARCHAR(64)"),
("FinalMajorStatus", "VARCHAR(32)"),
("FinalMinorStatus", "VARCHAR(256)"),
]
self.definitionAccountingFields = [
("CPUTime", "INT UNSIGNED"), # utime + stime + cutime + cstime
("NormCPUTime", "INT UNSIGNED"), # CPUTime * CPUNormalizationFactor
("ExecTime", "INT UNSIGNED"), # elapsed_time (wall time) * numberOfProcessors
("InputDataSize", "BIGINT UNSIGNED"),
("OutputDataSize", "BIGINT UNSIGNED"),
("InputDataFiles", "INT UNSIGNED"),
("OutputDataFiles", "INT UNSIGNED"),
("DiskSpace", "BIGINT UNSIGNED"),
("InputSandBoxSize", "BIGINT UNSIGNED"),
("OutputSandBoxSize", "BIGINT UNSIGNED"),
("ProcessedEvents", "INT UNSIGNED"), # unused (normally not filled)
]
self.bucketsLength = [
(86400 * 8, 3600), # <1w+1d = 1h
(86400 * 35, 3600 * 4), # <35d = 4h
(86400 * 30 * 6, 86400), # <6m = 1d
(86400 * 365, 86400 * 2), # <1y = 2d
(86400 * 600, 604800), # >1y = 1w
]
self.checkType()
# Fill the site
self.setValueByKey("Site", DIRAC.siteName())
def checkRecord(self):
result = self.getValue("ExecTime")
if not result["OK"]:
return result
execTime = result["Value"]
if execTime > 33350400: # 1 year
return DIRAC.S_ERROR("OOps. More than 1 year of cpu time smells fishy!")
return DIRAC.S_OK()
|
DIRACGrid/DIRAC
|
src/DIRAC/AccountingSystem/Client/Types/Job.py
|
Python
|
gpl-3.0
| 2,185
|
[
"DIRAC"
] |
2b9d76ce8f5eb5fb8338a3692f90d6c7e28cf3e1fe1072a0c67c3cd0f66f1253
|
# Storage filtering classes
#
# Copyright (C) 2013 Red Hat, Inc.
#
# This copyrighted material is made available to anyone wishing to use,
# modify, copy, or redistribute it subject to the terms and conditions of
# the GNU General Public License v.2, or (at your option) any later version.
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY expressed or implied, including the implied warranties of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General
# Public License for more details. You should have received a copy of the
# GNU General Public License along with this program; if not, write to the
# Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
# 02110-1301, USA. Any Red Hat trademarks that are incorporated in the
# source code or documentation are not subject to the GNU General Public
# License and may only be used or replicated with the express permission of
# Red Hat, Inc.
#
# Red Hat Author(s): Chris Lumens <clumens@redhat.com>
#
# pylint: disable-msg=E0611
from gi.repository import Gtk
from collections import namedtuple
import itertools
from blivet import arch
from blivet.devices import DASDDevice, FcoeDiskDevice, iScsiDiskDevice, MultipathDevice, MDRaidArrayDevice, ZFCPDiskDevice
from blivet.fcoe import has_fcoe
from pyanaconda.flags import flags
from pyanaconda.i18n import _, N_, P_
from pyanaconda.ui.lib.disks import getDisks, isLocalDisk, size_str
from pyanaconda.ui.gui.utils import enlightbox
from pyanaconda.ui.gui.spokes import NormalSpoke
from pyanaconda.ui.gui.spokes.advstorage.fcoe import FCoEDialog
from pyanaconda.ui.gui.spokes.advstorage.iscsi import ISCSIDialog
from pyanaconda.ui.gui.spokes.advstorage.zfcp import ZFCPDialog
from pyanaconda.ui.gui.spokes.lib.cart import SelectedDisksDialog
from pyanaconda.ui.gui.categories.system import SystemCategory
__all__ = ["FilterSpoke"]
DiskStoreRow = namedtuple("DiskStoreRow", ["visible", "selected", "mutable",
"name", "type", "model", "capacity",
"vendor", "interconnect", "serial",
"wwid", "paths", "port", "target",
"lun", "ccw", "wwpn"])
class FilterPage(object):
"""A FilterPage is the logic behind one of the notebook tabs on the filter
UI spoke. Each page has its own specific filtered model overlaid on top
of a common model that holds all non-advanced disks.
A Page is created once, when the filter spoke is initialized. It is
setup multiple times - each time the spoke is revisited. When the Page
is setup, it is given a complete view of all disks that belong on this
Page. This is because certain pages may require populating a combo with
all vendor names, or other similar tasks.
This class is just a base class. One subclass should be created for each
more specialized type of page. Only one instance of each subclass should
ever be created.
"""
def __init__(self, storage, builder):
"""Create a new FilterPage instance.
Instance attributes:
builder -- A reference to the Gtk.Builder instance containing
this page's UI elements.
filterActive -- Whether the user has chosen to filter results down
on this page. If set, visible_func should take the
filter UI elements into account.
storage -- An instance of a blivet object.
"""
self.builder = builder
self.storage = storage
self.model = None
self.filterActive = False
def ismember(self, device):
"""Does device belong on this page? This function should taken into
account what kind of thing device is. It should not be concerned
with any sort of filtering settings. It only determines whether
device belongs.
"""
return True
def setup(self, store, selectedNames, disks):
"""Do whatever setup of the UI is necessary before this page can be
displayed. This function is called every time the filter spoke
is revisited, and thus must first do any cleanup that is necessary.
The setup function is passed a reference to the master store, a list
of names of disks the user has selected (either from a previous visit
or via kickstart), and a list of all disk objects that belong on this
page as determined from the ismember method.
At the least, this method should add all the disks to the store. It
may also need to populate combos and other lists as appropriate.
"""
pass
def clear(self):
"""Blank out any filtering-related fields on this page and return them
to their defaults. This is called when the Clear button is clicked.
"""
pass
def visible_func(self, model, itr, *args):
"""This method is called for every row (disk) in the store, in order to
determine if it should be displayed on this page or not. This method
should take into account whether filterActive is set, perhaps whether
something in pyanaconda.flags is setup, and other settings to make
a final decision. Because filtering can be complicated, many pages
will want to farm this decision out to another method.
The return value is a boolean indicating whether the row is visible
or not.
"""
return True
def setupCombo(self, combo, items):
"""Populate a given GtkComboBoxText instance with a list of items. The
combo will first be cleared, so this method is suitable for calling
repeatedly. The first item in the list will be selected by default.
"""
combo.remove_all()
for i in sorted(items):
combo.append_text(i)
if items:
combo.set_active(0)
def _long_identifier(self, disk):
# For iSCSI devices, we want the long ip-address:port-iscsi-tgtname-lun-XX
# identifier, but blivet doesn't expose that in any useful way and I don't
# want to go asking udev. Instead, we dig around in the deviceLinks and
# default to the name if we can't figure anything else out.
for link in disk.deviceLinks:
if "by-path" in link:
lastSlash = link.rindex("/")+1
return link[lastSlash:]
return disk.name
class SearchPage(FilterPage):
def __init__(self, storage, builder):
FilterPage.__init__(self, storage, builder)
self.model = self.builder.get_object("searchModel")
self.model.set_visible_func(self.visible_func)
self._lunEntry = self.builder.get_object("searchLUNEntry")
self._wwidEntry = self.builder.get_object("searchWWIDEntry")
self._portCombo = self.builder.get_object("searchPortCombo")
self._targetEntry = self.builder.get_object("searchTargetEntry")
def setup(self, store, selectedNames, disks):
self._combo = self.builder.get_object("searchTypeCombo")
self._combo.set_active(0)
self._combo.emit("changed")
ports = []
for disk in disks:
if hasattr(disk, "node"):
ports.append(str(disk.node.port))
self.setupCombo(self.builder.get_object("searchPortCombo"), ports)
def clear(self):
self._lunEntry.set_text("")
self._portCombo.set_active(0)
self._targetEntry.set_text("")
self._wwidEntry.set_text("")
def _port_equal(self, device):
active = self._portCombo.get_active_text()
if active and hasattr(device, "node"):
return device.node.port == active
else:
return True
def _target_equal(self, device):
active = self._targetEntry.get_text().strip()
if active:
return active in getattr(device, "initiator", "")
else:
return True
def _lun_equal(self, device):
active = self._lunEntry.get_text().strip()
if active and hasattr(device, "node"):
try:
return int(active) == device.node.tpgt
except ValueError:
return True
elif active and hasattr(device, "fcp_lun"):
return active in device.fcp_lun
else:
return True
def _filter_func(self, device):
if not self.filterActive:
return True
filterBy = self._combo.get_active()
if filterBy == 0:
return True
elif filterBy == 1:
return self._port_equal(device) and self._target_equal(device) and self._lun_equal(device)
elif filterBy == 2:
return self._wwidEntry.get_text() in getattr(device, "wwid", self._long_identifier(device))
def visible_func(self, model, itr, *args):
obj = DiskStoreRow._make(model[itr])
device = self.storage.devicetree.getDeviceByName(obj.name, hidden=True)
return self._filter_func(device)
class MultipathPage(FilterPage):
def __init__(self, storage, builder):
FilterPage.__init__(self, storage, builder)
self.model = self.builder.get_object("multipathModel")
self.model.set_visible_func(self.visible_func)
self._icCombo = self.builder.get_object("multipathInterconnectCombo")
self._vendorCombo = self.builder.get_object("multipathVendorCombo")
self._wwidEntry = self.builder.get_object("multipathWWIDEntry")
def ismember(self, device):
return isinstance(device, MultipathDevice)
def setup(self, store, selectedNames, disks):
vendors = []
interconnects = []
for disk in disks:
paths = [d.name for d in disk.parents]
selected = disk.name in selectedNames
store.append([True, selected, not disk.protected,
disk.name, "", disk.model, size_str(disk.size),
disk.vendor, disk.bus, disk.serial,
disk.wwid, "\n".join(paths), "", "",
"", "", ""])
if not disk.vendor in vendors:
vendors.append(disk.vendor)
if not disk.bus in interconnects:
interconnects.append(disk.bus)
self._combo = self.builder.get_object("multipathTypeCombo")
self._combo.set_active(0)
self._combo.emit("changed")
self.setupCombo(self._vendorCombo, vendors)
self.setupCombo(self._icCombo, interconnects)
def clear(self):
self._icCombo.set_active(0)
self._vendorCombo.set_active(0)
self._wwidEntry.set_text("")
def _filter_func(self, device):
if not self.filterActive:
return True
filterBy = self._combo.get_active()
if filterBy == 0:
return True
elif filterBy == 1:
return device.vendor == self._vendorCombo.get_active_text()
elif filterBy == 2:
return device.bus == self._icCombo.get_active_text()
elif filterBy == 3:
return self._wwidEntry.get_text() in device.wwid
def visible_func(self, model, itr, *args):
if not flags.mpath:
return False
obj = DiskStoreRow._make(model[itr])
device = self.storage.devicetree.getDeviceByName(obj.name, hidden=True)
return self.ismember(device) and self._filter_func(device)
class OtherPage(FilterPage):
def __init__(self, storage, builder):
FilterPage.__init__(self, storage, builder)
self.model = self.builder.get_object("otherModel")
self.model.set_visible_func(self.visible_func)
self._icCombo = self.builder.get_object("otherInterconnectCombo")
self._idEntry = self.builder.get_object("otherIDEntry")
self._vendorCombo = self.builder.get_object("otherVendorCombo")
def ismember(self, device):
return isinstance(device, iScsiDiskDevice) or isinstance(device, FcoeDiskDevice)
def setup(self, store, selectedNames, disks):
vendors = []
interconnects = []
for disk in disks:
paths = [d.name for d in disk.parents]
selected = disk.name in selectedNames
if not disk.vendor in vendors:
vendors.append(disk.vendor)
if not disk.bus in interconnects:
interconnects.append(disk.bus)
if hasattr(disk, "node"):
port = str(disk.node.port)
lun = str(disk.node.tpgt)
else:
port = ""
lun = ""
store.append([True, selected, not disk.protected,
disk.name, "", disk.model, size_str(disk.size),
disk.vendor, disk.bus, disk.serial,
self._long_identifier(disk), "", port, getattr(disk, "initiator", ""),
lun, "", ""])
if not disk.vendor in vendors:
vendors.append(disk.vendor)
if not disk.bus in interconnects:
interconnects.append(disk.bus)
self._combo = self.builder.get_object("otherTypeCombo")
self._combo.set_active(0)
self._combo.emit("changed")
self.setupCombo(self._vendorCombo, vendors)
self.setupCombo(self._icCombo, interconnects)
def clear(self):
self._icCombo.set_active(0)
self._idEntry.set_text("")
self._vendorCombo.set_active(0)
def _filter_func(self, device):
if not self.filterActive:
return True
filterBy = self._combo.get_active()
if filterBy == 0:
return True
elif filterBy == 1:
return device.vendor == self._vendorCombo.get_active_text()
elif filterBy == 2:
return device.bus == self._icCombo.get_active_text()
elif filterBy == 3:
for link in device.deviceLinks:
if "by-path" in link:
return self._idEntry.get_text().strip() in link
return False
def visible_func(self, model, itr, *args):
obj = DiskStoreRow._make(model[itr])
device = self.storage.devicetree.getDeviceByName(obj.name, hidden=True)
return self.ismember(device) and self._filter_func(device)
class RaidPage(FilterPage):
def __init__(self, storage, builder):
FilterPage.__init__(self, storage, builder)
self.model = self.builder.get_object("raidModel")
self.model.set_visible_func(self.visible_func)
def ismember(self, device):
return isinstance(device, MDRaidArrayDevice) and device.isDisk
def visible_func(self, model, itr, *args):
if not flags.dmraid:
return False
obj = DiskStoreRow._make(model[itr])
device = self.storage.devicetree.getDeviceByName(obj.name, hidden=True)
return self.ismember(device)
class ZPage(FilterPage):
def __init__(self, storage, builder):
FilterPage.__init__(self, storage, builder)
self.model = self.builder.get_object("zModel")
self.model.set_visible_func(self.visible_func)
self._ccwEntry = self.builder.get_object("zCCWEntry")
self._wwpnEntry = self.builder.get_object("zWWPNEntry")
self._lunEntry = self.builder.get_object("zLUNEntry")
self._isS390 = arch.isS390()
def clear(self):
self._lunEntry.set_text("")
self._ccwEntry.set_text("")
self._wwpnEntry.set_text("")
def ismember(self, device):
return isinstance(device, ZFCPDiskDevice) or isinstance(device, DASDDevice)
def setup(self, store, selectedNames, disks):
""" Set up our Z-page, but only if we're running on s390x. """
if not self._isS390:
return
else:
ccws = []
wwpns = []
luns = []
self._combo = self.builder.get_object("zTypeCombo")
self._combo.set_active(0)
self._combo.emit("changed")
for disk in disks:
paths = [d.name for d in disk.parents]
selected = disk.name in selectedNames
if getattr(disk, "type") == "zfcp":
# remember to store all of the zfcp-related junk so we can
# see it in the UI
if not disk.fcp_lun in luns:
luns.append(disk.fcp_lun)
if not disk.wwpn in wwpns:
wwpns.append(disk.wwpn)
if not disk.hba_id in ccws:
ccws.append(disk.hba_id)
# now add it to our store
store.append([True, selected, not disk.protected,
disk.name, "", disk.model, size_str(disk.size),
disk.vendor, disk.bus, disk.serial, "", "\n".join(paths),
"", "", disk.fcp_lun, disk.hba_id, disk.wwpn])
def _filter_func(self, device):
if not self.filterActive:
return True
filterBy = self._combo.get_active()
if filterBy == 0:
return True
elif filterBy == 1:
return self._ccwEntry.get_text() in device.hba_id
elif filterBy == 2:
return self._wwpnEntry.get_text() in device.wwpn
elif filterBy == 3:
return self._lunEntry.get_text() in device.fcp_lun
return False
def visible_func(self, model, itr, *args):
obj = DiskStoreRow._make(model[itr])
device = self.storage.devicetree.getDeviceByName(obj.name, hidden=True)
return self.ismember(device) and self._filter_func(device)
class FilterSpoke(NormalSpoke):
builderObjects = ["diskStore", "filterWindow",
"searchModel", "multipathModel", "otherModel", "raidModel", "zModel"]
mainWidgetName = "filterWindow"
uiFile = "spokes/filter.glade"
category = SystemCategory
title = N_("_INSTALLATION DESTINATION")
def __init__(self, *args):
NormalSpoke.__init__(self, *args)
self.applyOnSkip = True
@property
def indirect(self):
return True
def apply(self):
onlyuse = self.selected_disks[:]
for disk in [d for d in self.storage.disks if d.name in onlyuse]:
onlyuse.extend([d.name for d in disk.ancestors
if d.name not in onlyuse])
self.data.ignoredisk.onlyuse = onlyuse
self.data.clearpart.drives = self.selected_disks[:]
def initialize(self):
NormalSpoke.initialize(self)
self.pages = [SearchPage(self.storage, self.builder),
MultipathPage(self.storage, self.builder),
OtherPage(self.storage, self.builder),
RaidPage(self.storage, self.builder),
ZPage(self.storage, self.builder)]
self._notebook = self.builder.get_object("advancedNotebook")
if not arch.isS390():
self._notebook.remove_page(-1)
self.builder.get_object("addZFCPButton").destroy()
if not has_fcoe():
self.builder.get_object("addFCOEButton").destroy()
self._store = self.builder.get_object("diskStore")
self._addDisksButton = self.builder.get_object("addDisksButton")
def _real_ancestors(self, disk):
# Return a list of all the ancestors of a disk, but remove the disk
# itself from this list.
return [d for d in disk.ancestors if d.name != disk.name]
def refresh(self):
NormalSpoke.refresh(self)
self.disks = getDisks(self.storage.devicetree)
self.selected_disks = self.data.ignoredisk.onlyuse[:]
self.ancestors = itertools.chain(*map(self._real_ancestors, self.disks))
self.ancestors = map(lambda d: d.name, self.ancestors)
self._store.clear()
allDisks = []
multipathDisks = []
otherDisks = []
raidDisks = []
zDisks = []
# Now all all the non-local disks to the store. Everything has been set up
# ahead of time, so there's no need to configure anything. We first make
# these lists of disks, then call setup on each individual page. This is
# because there could be page-specific setup to do that requires a complete
# view of all the disks on that page.
for disk in self.disks:
if self.pages[1].ismember(disk):
multipathDisks.append(disk)
elif self.pages[2].ismember(disk):
otherDisks.append(disk)
elif self.pages[3].ismember(disk):
raidDisks.append(disk)
elif self.pages[4].ismember(disk):
zDisks.append(disk)
allDisks.append(disk)
self.pages[0].setup(self._store, self.selected_disks, allDisks)
self.pages[1].setup(self._store, self.selected_disks, multipathDisks)
self.pages[2].setup(self._store, self.selected_disks, otherDisks)
self.pages[3].setup(self._store, self.selected_disks, raidDisks)
self.pages[4].setup(self._store, self.selected_disks, zDisks)
self._update_summary()
def _update_summary(self):
summaryButton = self.builder.get_object("summary_button")
label = summaryButton.get_children()[0]
# We need to remove ancestor devices from the count. Otherwise, we'll
# end up in a situation where selecting one multipath device could
# potentially show three devices selected (mpatha, sda, sdb for instance).
count = len([disk for disk in self.selected_disks if disk not in self.ancestors])
summary = P_("%d _storage device selected",
"%d _storage devices selected",
count) % count
label.set_use_markup(True)
label.set_markup("<span foreground='blue'><u>%s</u></span>" % summary)
label.set_use_underline(True)
summaryButton.set_visible(count > 0)
label.set_sensitive(count > 0)
def on_back_clicked(self, button):
self.skipTo = "StorageSpoke"
NormalSpoke.on_back_clicked(self, button)
def on_summary_clicked(self, button):
dialog = SelectedDisksDialog(self.data)
# Include any disks selected in the initial storage spoke, plus any
# selected in this filter UI.
disks = [disk for disk in self.disks if disk.name in self.selected_disks]
free_space = self.storage.getFreeSpace(disks=disks)
with enlightbox(self.window, dialog.window):
dialog.refresh(disks, free_space, showRemove=False, setBoot=False)
dialog.run()
def on_find_clicked(self, button):
n = self._notebook.get_current_page()
self.pages[n].filterActive = True
self.pages[n].model.refilter()
def on_clear_icon_clicked(self, entry, icon_pos, event):
if icon_pos == Gtk.EntryIconPosition.SECONDARY:
entry.set_text("")
def on_page_switched(self, notebook, newPage, newPageNum, *args):
self.pages[newPageNum].model.refilter()
notebook.get_nth_page(newPageNum).show_all()
def on_row_toggled(self, button, path):
if not path:
return
page_index = self._notebook.get_current_page()
filter_model = self.pages[page_index].model
model_itr = filter_model.get_iter(path)
itr = filter_model.convert_iter_to_child_iter(model_itr)
self._store[itr][1] = not self._store[itr][1]
if self._store[itr][1] and self._store[itr][3] not in self.selected_disks:
self.selected_disks.append(self._store[itr][3])
elif not self._store[itr][1] and self._store[itr][3] in self.selected_disks:
self.selected_disks.remove(self._store[itr][3])
self._update_summary()
def on_add_iscsi_clicked(self, widget, *args):
dialog = ISCSIDialog(self.data, self.storage)
with enlightbox(self.window, dialog.window):
dialog.refresh()
dialog.run()
# We now need to refresh so any new disks picked up by adding advanced
# storage are displayed in the UI.
self.refresh()
def on_add_fcoe_clicked(self, widget, *args):
dialog = FCoEDialog(self.data, self.storage)
with enlightbox(self.window, dialog.window):
dialog.refresh()
dialog.run()
# We now need to refresh so any new disks picked up by adding advanced
# storage are displayed in the UI.
self.refresh()
def on_add_zfcp_clicked(self, widget, *args):
dialog = ZFCPDialog(self.data, self.storage)
with enlightbox(self.window, dialog.window):
dialog.refresh()
dialog.run()
# We now need to refresh so any new disks picked up by adding advanced
# storage are displayed in the UI.
self.refresh()
##
## SEARCH TAB SIGNAL HANDLERS
##
def on_search_type_changed(self, combo):
ndx = combo.get_active()
notebook = self.builder.get_object("searchTypeNotebook")
findButton = self.builder.get_object("searchFindButton")
findButton.set_sensitive(ndx != 0)
notebook.set_current_page(ndx)
##
## MULTIPATH TAB SIGNAL HANDLERS
##
def on_multipath_type_changed(self, combo):
ndx = combo.get_active()
notebook = self.builder.get_object("multipathTypeNotebook")
findButton = self.builder.get_object("multipathFindButton")
findButton.set_sensitive(ndx != 0)
notebook.set_current_page(ndx)
##
## OTHER TAB SIGNAL HANDLERS
##
def on_other_type_combo_changed(self, combo):
ndx = combo.get_active()
notebook = self.builder.get_object("otherTypeNotebook")
findButton = self.builder.get_object("otherFindButton")
findButton.set_sensitive(ndx != 0)
notebook.set_current_page(ndx)
##
## Z TAB SIGNAL HANDLERS
##
def on_z_type_combo_changed(self, combo):
ndx = combo.get_active()
notebook = self.builder.get_object("zTypeNotebook")
findButton = self.builder.get_object("zFindButton")
findButton.set_sensitive(ndx != 0)
notebook.set_current_page(ndx)
|
projectatomic/anaconda
|
pyanaconda/ui/gui/spokes/filter.py
|
Python
|
gpl-2.0
| 26,603
|
[
"VisIt"
] |
9adc0948fba718e00039f061e38dc64bba303fa327c6aa1d5117de0d00c6c919
|
# -*- coding: utf-8 -*-
import moose
import re
def fixPath(path):
path = re.sub(r'/+', '/', path)
return path
def test_path():
paths = [ '/a'
, '//a'
, '/a/b'
, '/a/b/'
, '//a//b/////'
, '/a/./b'
, '///a/././b'
]
expectedPath = set([fixPath(p) for p in paths])
print(expectedPath)
for p in paths:
print("-- Creating %s" % p)
if not moose.exists(p):
print('exists')
p = moose.Neutral(p)
else:
p = moose.element(p)
print(p)
foundPath = []
for p in moose.wildcardFind('/##'):
if "/a" in p.path:
foundPath.append(p.path)
testFailed = False
for f in foundPath:
f = re.sub(r'\[\d+\]', '', f)
if f not in expectedPath:
testFailed = True
assert not testFailed, "Test failed"
def main():
test_path()
if __name__ == '__main__':
main()
|
dilawar/moose-core
|
tests/core/test_moose_paths.py
|
Python
|
gpl-3.0
| 986
|
[
"MOOSE"
] |
bac4beef5ffcd33255f02564e3adc2cc001ac9258ec93e735d9ae498521a4313
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# ---------------------------------------------------------------------
# Copyright (c) 2012 Michael Hull.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# - Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# - Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materials provided with the
# distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# ----------------------------------------------------------------------
from morphforge.simulation.base.base_classes import NamedSimulationObject
class NEURONObject(NamedSimulationObject):
""" Base class for objects in a NEURON simulation.
This includes cells, stimulations, recordables, templates, ....
"""
def build_hoc(self, hocfile_obj):
raise NotImplementedError()
def build_mod(self, modfile_set):
raise NotImplementedError()
def get_recordable(self, *args, **kwargs):
raise NotImplementedError()
|
mikehulluk/morphforge
|
src/morphforge/simulation/neuron/objects/neuronobject.py
|
Python
|
bsd-2-clause
| 2,027
|
[
"NEURON"
] |
a9e79dad9ac614b270df92c75e9559393ab58dd272b5888ce46b9682048a40c1
|
#!/usr/bin/python
#
# @BEGIN LICENSE
#
# Psi4: an open-source quantum chemistry software package
#
# Copyright (c) 2007-2019 The Psi4 Developers.
#
# The copyrights for code used from other parties are included in
# the corresponding files.
#
# This file is part of Psi4.
#
# Psi4 is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, version 3.
#
# Psi4 is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License along
# with Psi4; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# @END LICENSE
#
import sys
import os
import glob
import re
DriverPath = ''
InsertPath = '/../../../'
if (len(sys.argv) == 2):
DriverPath = sys.argv[1] + '/'
sys.path.insert(0, os.path.abspath(os.getcwd()))
import apply_relpath
IncludePath = apply_relpath.get_topsrcdir_asrelativepathto_objdirsfnxsource()[1]
def pts(category, pyfile):
print('Auto-documenting %s file %s' % (category, pyfile))
# helper fn
def sphinxify_comment(text):
text = text.replace('@@', '_')
text = text.replace(' $', ' :math:`')
text = text.replace('($', '(\ :math:`')
text = text.replace('$ ', '` ')
text = text.replace('$.', '`.')
text = text.replace('$,', '`,')
text = text.replace('$)', '`\ )')
return text
# helper fn
# including the options abbr substitutions file in every SSSOUT option file slows
# compilation by a factor of ten. so, back-translate |%s__%s| into :term:`%s`
def substitute_comment(cmnt):
subst = re.compile(r'^(.*?)[\s\(]\|(\w+)__(\w+)\|[\s\).,](.*?)$')
while True:
if subst.match(cmnt):
m = subst.match(cmnt)
cmnt = m.group(1) + ' :term:`' + m.group(3).upper() + ' <' + m.group(3).upper() + ' (' + m.group(2).upper() + ')>` ' + m.group(4)
continue
else:
break
return cmnt
# helper fn
def determine_options(cfilename):
module = re.compile(r'^(.*)name\s*==\s*"(.*)"(.*?)$', re.IGNORECASE)
modulecomment = re.compile(r'^(\s*?)\/\*-\s*MODULEDESCRIPTION\s*(.*?)-\*\/(\s*?)$', re.IGNORECASE)
modulecommentstart = re.compile(r'^(\s*?)\/\*-\s*MODULEDESCRIPTION\s*(.*?)(\s*?)$', re.IGNORECASE)
subsection = re.compile(r'^(\s*?)\/\*-\s*SUBSECTION\s*(.*?)\s*-\*\/(\s*?)$', re.IGNORECASE)
comment = re.compile(r'^(\s*?)\/\*-\s*(.*?)-\*\/(\s*?)$', re.IGNORECASE)
commentend = re.compile(r'^(\s*)(.*?)-\*\/(\s*?)$', re.IGNORECASE)
commentstart = re.compile(r'^(\s*?)\/\*-\s*(.*)(\s*?)$', re.IGNORECASE)
kw_string_def_opt = re.compile(r'add_str\(\s*"(.*)"\s*,\s*"(.*)"\s*,\s*"(.*)"\s*\)')
kw_string_def_opt_2 = re.compile(r'add_str_i\(\s*"(.*)"\s*,\s*"(.*)"\s*,\s*"(.*)"\s*\)')
kw_string_def = re.compile(r'add_str\(\s*"(.*)"\s*,\s*"(.*)"\s*\)')
kw_string_def_2 = re.compile(r'add_str_i\(\s*"(.*)"\s*,\s*"(.*)"\s*\)')
kw_bool_def = re.compile(r'add_bool\(\s*"(.*)"\s*,\s*("?)([-\w]+)("?)\s*\)')
kw_double_def = re.compile(r'add_double\(\s*"(.*)"\s*,\s*("?)([-/\.\w]+)("?)\s*\)')
kw_generic_def = re.compile(r'add_(\w+)\(\s*"(\w+)"\s*,\s*("?)([-\w]+)("?)\s*\)') # untested
kw_complicated = re.compile(r'add\(\s*"(\w*)"\s*,\s*new\s+(\w+)\(\)\s*\)') # untested
fcfile = open(cfilename)
contents = fcfile.readlines()
fcfile.close()
ii = 0
while (ii < len(contents)):
line = contents[ii]
if module.match(line):
currentmodule = module.match(line).group(2).upper()
fmodule.write('.. toctree::\n :hidden:\n :glob:\n\n %s__*\n\n' % (currentmodule.lower()))
elif modulecommentstart.match(line):
tag = ''
while 1:
if (not commentend.match(line)):
if modulecommentstart.match(line):
tag += modulecommentstart.match(line).group(2)
else:
tag += ' ' + line.strip()
ii += 1
line = contents[ii]
continue
else:
if modulecomment.match(line):
tag += modulecomment.match(line).group(2)
break
else:
tag += ' ' + commentend.match(line).group(2)
break
fglossary.write('**%s**: %s\n\n' % (currentmodule, tag))
elif subsection.match(line):
currentsubsection = subsection.match(line).group(2)
fglossary.write('\n%s\n%s\n\n' % (currentsubsection, '^' * len(currentsubsection)))
fglossary.write('.. glossary::\n :sorted:\n\n')
elif commentstart.match(line):
tag = ''
while 1:
if (not commentend.match(line)):
if commentstart.match(line):
tag += commentstart.match(line).group(2)
else:
tag += ' ' + line.strip()
ii += 1
line = contents[ii]
continue
else:
if comment.match(line):
tag += comment.match(line).group(2)
break
else:
tag += ' ' + commentend.match(line).group(2)
break
tag = sphinxify_comment(tag)
# capture option immediately after comment
kw_name = ''
kw_default = 'No Default'
kw_type = ''
kw_possible = ''
ii += 1
line = contents[ii]
if (not line or line.isspace()):
ii += 1
line = contents[ii]
if kw_string_def_opt.search(line):
m = kw_string_def_opt.search(line)
kw_name = m.group(1)
kw_type = 'str'
if not (not m.group(2) or m.group(2).isspace()):
kw_default = m.group(2)
kw_possible = m.group(3)
elif kw_string_def_opt_2.search(line):
m = kw_string_def_opt_2.search(line)
kw_name = m.group(1)
kw_type = 'str'
if not (not m.group(2) or m.group(2).isspace()):
kw_default = m.group(2)
kw_possible = m.group(3)
elif kw_string_def.search(line):
m = kw_string_def.search(line)
kw_name = m.group(1)
kw_type = 'str'
if not (not m.group(2) or m.group(2).isspace()):
kw_default = m.group(2)
elif kw_string_def_2.search(line):
m = kw_string_def_2.search(line)
kw_name = m.group(1)
kw_type = 'str'
if not (not m.group(2) or m.group(2).isspace()):
kw_default = m.group(2)
elif kw_bool_def.search(line):
m = kw_bool_def.search(line)
kw_name = m.group(1)
kw_type = 'bool'
if not (not m.group(3) or m.group(3).isspace()):
kw_default = m.group(3).lower()
if kw_default == '1':
kw_default = 'true'
if kw_default == '0':
kw_default = 'false'
elif kw_double_def.search(line):
m = kw_double_def.search(line)
kw_name = m.group(1)
kw_type = 'double'
if not (not m.group(3) or m.group(3).isspace()):
kw_default = m.group(3).lower()
elif kw_generic_def.search(line):
m = kw_generic_def.search(line)
kw_name = m.group(2)
kw_type = m.group(1)
if not (not m.group(4) or m.group(4).isspace()):
kw_default = m.group(4).lower()
elif kw_complicated.search(line):
m = kw_complicated.search(line)
kw_name = m.group(1)
kw_type = m.group(2)
if kw_type == 'ArrayType':
kw_type = 'array'
elif kw_type == 'MapType':
kw_type = 'map'
elif kw_type == 'PythonDataType':
kw_type = 'python'
else:
print('ERROR: unrecognized type %s for %s' % (kw_type, kw_name))
sys.exit()
if kw_type == 'str': kw_type = 'string'
elif kw_type == 'int': kw_type = 'integer'
elif kw_type == 'bool': kw_type = 'boolean'
elif kw_type == 'double': pass
elif kw_type == 'array': pass
elif kw_type == 'map': pass
elif kw_type == 'python': pass
else:
print('ERROR: unrecognized type2 %s for %s' % (kw_type, kw_name))
sys.exit()
#print 'kw_name = \t', kw_name
#print 'kw_type = \t', kw_type
#print 'kw_dflt = \t', kw_default
#print 'kw_poss = \t', kw_possible
#print 'kw_tagl = \t', tag
#print '\n'
# substitution list file
fabbr.write('.. |%s__%s| replace:: :term:`%s <%s (%s)>`\n' %
(currentmodule.lower(), kw_name.lower(), kw_name.upper(), kw_name.upper(), currentmodule.upper()))
# individual option file for plugin options. rather pointless but consistent w/regular module options
fsssdoc = open('source/autodir_plugins/'+currentmodule.lower()+'__'+kw_name.lower()+'.rst', 'w')
div = '"' * (14 + len(currentmodule) + 2 * len(kw_name))
fsssdoc.write(':term:`%s <%s (%s)>`\n%s\n\n' % (kw_name.upper(), kw_name.upper(), currentmodule.upper(), div))
fsssdoc.write(' %s\n\n' % (substitute_comment(tag)))
fglossary.write(' %s (%s)\n %s\n\n' % (kw_name.upper(), currentmodule.upper(), tag))
if kw_type == 'boolean':
fglossary.write(' * **Type**: :ref:`boolean <op_c_boolean>`\n')
fsssdoc.write(' * **Type**: :ref:`boolean <op_c_boolean>`\n')
elif (kw_type == 'double') and ((kw_name.lower().find('conv') > -1) or (kw_name.lower().find('tol') > -1)):
fglossary.write(' * **Type**: :ref:`conv double <op_c_conv>`\n')
fsssdoc.write(' * **Type**: :ref:`conv double <op_c_conv>`\n')
elif (kw_type == 'string') and ((kw_name.lower() == 'basis') or (kw_name.lower().startswith('df_basis'))):
fglossary.write(' * **Type**: %s\n' % kw_type)
fsssdoc.write(' * **Type**: %s\n' % kw_type)
fglossary.write(' * **Possible Values**: :ref:`basis string <apdx:basisElement>`\n')
fsssdoc.write(' * **Possible Values**: :ref:`basis string <apdx:basisElement>`\n')
else:
fglossary.write(' * **Type**: %s\n' % kw_type)
fsssdoc.write(' * **Type**: %s\n' % kw_type)
if not (not kw_possible or kw_possible.isspace()):
sline = kw_possible.split()
fglossary.write(' * **Possible Values**: %s\n' % (', '.join(sline)))
fsssdoc.write(' * **Possible Values**: %s\n' % (', '.join(sline)))
fglossary.write(' * **Default**: %s\n\n' % kw_default)
fsssdoc.write(' * **Default**: %s\n\n' % kw_default)
fsssdoc.close()
if (line.find('extern "C" PsiReturnType') > -1):
break
ii += 1
# Objective #3
# Plugin directories in psi4/plugin/
fdriver = open('source/autodoc_available_plugins.rst', 'w')
fdriver.write('\n.. index:: plugins; available\n')
fdriver.write('.. _`sec:availablePlugins`:\n\n')
fdriver.write('====================================================\n')
fdriver.write('Emerging Theoretical Methods: Plugins DFADC to RQCHF\n')
fdriver.write('====================================================\n\n')
fdriver.write('.. toctree::\n :maxdepth: 1\n\n')
fabbr = open('source/autodoc_abbr_options_plugins.rst', 'w')
# from each plugin directory ...
for pydir in glob.glob(DriverPath + '../../plugins/*'):
dirname = os.path.split(pydir)[1]
div = '=' * len(dirname)
if dirname not in []:
pts('plugin', dirname)
fdriver.write(' autodir_plugins/module__%s' % (dirname))
fmodule = open('source/autodir_plugins/module__'+dirname+'.rst', 'w')
fmodule.write('\n.. _`sec:%s`:\n' % (dirname.lower()))
fmodule.write('.. index:: plugin; %s\n\n' % (dirname.lower()))
fmodule.write(':srcplugin:`' + dirname.lower() + '`\n')
fmodule.write(div + '=============' + '\n\n')
#fmodule.write(dirname.lower() + '\n')
#fmodule.write(div + '\n\n')
#fmodule.write('.. toctree::\n :hidden:\n :glob:\n\n %s__*\n\n' % (dirname.lower()))
fmodule.write('.. toctree::\n :hidden:\n\n /autodir_plugins/glossary__%s\n\n' % (dirname.lower()))
fglossary = open('source/autodir_plugins/glossary__'+dirname+'.rst', 'w')
fglossary.write('\n.. include:: /autodoc_abbr_options_c.rst\n')
fglossary.write('.. include:: /autodoc_abbr_options_plugins.rst\n\n')
fglossary.write('.. glossary::\n :sorted:\n\n')
# ... include doc.rst file
docfile = '%s/doc.rst' % (pydir)
if os.path.isfile(docfile):
fmodule.write('.. include:: %splugins/%s/doc.rst\n\n' % (IncludePath, dirname))
# ... include docstrings from any *.py files
pyfiles = glob.glob(pydir + '/*.py')
if len(pyfiles) > 0:
fmodule.write('Py-side Documentation\n')
fmodule.write('---------------------\n\n')
for pyfile in pyfiles:
filename = os.path.split(pyfile)[1]
basename = os.path.splitext(filename)[0]
fmodule.write('.. automodule:: %s.%s\n' % (dirname, basename))
fmodule.write(' :members:\n')
fmodule.write(' :undoc-members:\n\n')
# ... include keywords section from any *.cc files
# todo: turn this into a fn and store in a dictionary
cfiles = glob.glob(pydir + '/*.cc') + glob.glob(pydir + '/*.cc.in')
if len(cfiles) > 0:
fmodule.write('C-side Documentation\n')
fmodule.write('--------------------\n\n')
for cfile in cfiles:
determine_options(cfile)
fmodule.write('.. include:: /autodir_plugins/glossary__%s.rst' % (dirname))
fmodule.write('\n\n')
fmodule.close()
fglossary.write('\n\n')
fglossary.close()
fdriver.write('\n')
fdriver.write('\n')
fdriver.close()
fabbr.write('\n')
fabbr.close()
|
CDSherrill/psi4
|
doc/sphinxman/document_plugins.py
|
Python
|
lgpl-3.0
| 15,221
|
[
"Psi4"
] |
f3a5db21290d7d57cb1839b674a9944d2a679dffe39b300850ab61950cbb4bfe
|
#!/usr/bin/env python
#import argparse
#from glob import glob
dhdhd
from os.path import join as jp
from os.path import abspath
import os
import sys
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('-s', "--samples", help="Samples.txt file with sample ID.", required=True)
parser.add_argument('-b', "--bwaindex", help="Path to bwa index file.", required=True)
args = parser.parse_args()
#args = parser.parse_args('-s samples.txt -r /mnt/lfs2/hend6746/fox_cancer/0rawdata_test -b /mnt/lfs2/hend6746/wolves/reference/canfam31/canfam31.fa'.split())
VERBOSE=False
#Function definitions:
def log(txt, out):
if VERBOSE:
print(txt)
out.write(txt+'\n')
out.flush()
## Read in samples and put them in a list:
samples = []
for l in open(args.samples):
if len(l) > 1:
samples.append(l.split('/')[-1].replace('_R1_001.fastq.gz', '').strip())
# Setup folders and paths variables:
bamFolder = abspath('02-Mapped')
variantFolder = abspath('03-Calls')
PBS_scripts = abspath('GATK_PBS_scripts')
#rawdataDir = abspath(args.rawdata)
bwaIndex = abspath(args.bwaindex)
gatkCall = 'java -jar /opt/modules/biology/gatk/3.5/bin/GenomeAnalysisTK.jar -R %s' % bwaIndex
os.system('mkdir -p %s' % bamFolder)
os.system('mkdir -p %s' % variantFolder)
os.system('mkdir -p %s' % PBS_scripts)
##### Run pipeline ###
for sample in samples:
print "Processing", sample, "....."
# Set up files:
logFile = jp(variantFolder, sample + '_GATK.log')
logCommands = open(jp(PBS_scripts, sample + '_commands.sh'), 'w')
#Setup for qsub
log('#!/bin/bash', logCommands)
log('#PBS -N %s' % sample, logCommands)
log('#PBS -j oe', logCommands)
log('#PBS -o %s_job.log' % sample, logCommands)
log('#PBS -m abe', logCommands)
log('#PBS -M shendri4@gmail.com', logCommands)
log('#PBS -q short', logCommands)
log('#PBS -l mem=100000', logCommands)
log(". /usr/modules/init/bash", logCommands)
log("module load python/2.7.10", logCommands)
log("module load grc", logCommands)
###########Per-Sample Variant Calling
#HaplotypeCaller on each sample BAM file
#(if a sample's data is spread over more than one BAM, then pass them all in together) to create single-sample gVCFs
#not recommended for somatic (cancer) variant discovery. For that purpose, use MuTect2 instead
cmd = ' '.join([gatkCall, ' -T HaplotypeCaller ', ' -I ' + jp(bamFolder, sample) + '.bam', ' --emitRefConfidence GVCF ', ' -o ' + jp(variantFolder, sample) + '.raw.snps.indels.g.vcf', '>>', logFile, '2>&1'])
log(cmd, logCommands)
#os.system(cmd)
logCommands.close()
|
shendri4/fox_wgs
|
bam2vcf_GATK_pipeline.py
|
Python
|
apache-2.0
| 2,643
|
[
"BWA"
] |
148bb935ccbf597a52813ef5bc2483239dbec59a2fe4d51d3693ca40c438f84f
|
# Copyright 2013-2020 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class PyMdanalysis(PythonPackage):
"""MDAnalysis is a Python toolkit to analyze molecular dynamics
trajectories generated by a wide range of popular simulation
packages including DL_Poly, CHARMM, Amber, NAMD, LAMMPS, and
Gromacs. (See the lists of supported trajectory formats and
topology formats.)"""
homepage = "http://www.mdanalysis.org"
url = "https://pypi.io/packages/source/M/MDAnalysis/MDAnalysis-0.19.2.tar.gz"
version('1.0.0', sha256='f45a024aca45e390ff1c45ca90beb2180b78881be377e2a1aa9cd6c109bcfa81')
version('0.20.1', sha256='d04b71b193b9716d2597ffb9938b93f43487fa535da1bb5c1f2baccf356d7df9')
version('0.19.2', sha256='c5395bbafa5efca2e1aee4715d26129844140c47cb8301da0293106cb969de7d')
version('0.19.1', sha256='ff1d694f8598c0833ec340de6a6adb3b5e62b92d0fa94ee6401718ba972db3cc')
version('0.19.0', sha256='248e3b37fc6150e31c609cc18a3927c32aee37b76d29cbfedf635e7e1aa982cf')
version('0.18.0', sha256='a08acea1755112411e7db55e3f282e164b47a59e15794b38744cce6c596f252a')
version('0.17.0', sha256='9bd61760334698cc7b8a57ad26456451e926e9c9e66722594ad8816561348cde')
version('0.16.2', sha256='407d9a9ff1ab8a5e47973714d06fabff220f8d08a28792dee93e88e70e995b0a')
version('0.16.1', sha256='3dc8f5d639ab3a0d152cbd7259ae9372ec8a9bac0f8cb7d3b80ce5adc1e3ee57')
version('0.16.0', sha256='c4824fa1fddd336daa39371436187ebb023366885fb250c2827ed7fce2546bd4')
version('0.15.0', sha256='9088786048b47339cba1f8a586977bbb3bb04ae1bcd0462b59e45bda37e25533')
variant('analysis', default=True,
description='Enable analysis packages: matplotlib, scipy, seaborn')
variant('amber', default=False,
description='Support AMBER netcdf format.')
depends_on('python@2.7:', type=('build', 'run'))
depends_on('py-setuptools', type='build')
depends_on('py-cython@0.16:', type='build')
depends_on('py-six@1.4.0:', type=('build', 'run'))
depends_on('py-networkx@1.0:', type=('build', 'run'))
depends_on('py-gsd@1.4.0:', when='@1.17.0:', type=('build', 'run'))
depends_on('py-mmtf-python@1.0.0:', when='@0.16.0:', type=('build', 'run'))
depends_on('py-mock', when='@0.18.0:', type=('build', 'run'))
depends_on('py-tqdm@4.43.0:', when='@1.0.0:', type=('build', 'run'))
depends_on('py-joblib', when='@0.16.0:0.20.1', type=('build', 'run'))
depends_on('py-joblib@0.12:', when='@1.0.0:', type=('build', 'run'))
depends_on('py-numpy@1.5.0:', when='@:0.15.0', type=('build', 'run'))
depends_on('py-numpy@1.10.4:', when='@0.16.0:0.19.2', type=('build', 'run'))
depends_on('py-numpy@1.13.3:', when='@0.20.1:', type=('build', 'run'))
depends_on('py-biopython@1.59:', when='@:0.17.0', type=('build', 'run'))
depends_on('py-biopython@1.71:', when='@0.18.0:', type=('build', 'run'))
depends_on('py-griddataformats@0.3.2:', when='@:0.16.2', type=('build', 'run'))
depends_on('py-griddataformats@0.4:', when='@0.17.0:', type=('build', 'run'))
depends_on('py-matplotlib', when='@:0.15.0+analysis', type=('build', 'run'))
depends_on('py-matplotlib@1.5.1:', when='@0.16.0:0.16.1+analysis', type=('build', 'run'))
depends_on('py-matplotlib@1.5.1:', when='@0.16.2:', type=('build', 'run'))
depends_on('py-scipy', when='@:0.16.1+analysis', type=('build', 'run'))
depends_on('py-scipy', when='@0.16.2:0.17.0', type=('build', 'run'))
depends_on('py-scipy@1.0.0:', when='@0.18.0:', type=('build', 'run'))
depends_on('py-scikit-learn', when='@0.16.0:+analysis', type=('build', 'run'))
depends_on('py-seaborn', when='+analysis', type=('build', 'run'))
depends_on('py-netcdf4@1.0:', when='+amber', type=('build', 'run'))
depends_on('hdf5', when='+amber', type=('run'))
|
iulian787/spack
|
var/spack/repos/builtin/packages/py-mdanalysis/package.py
|
Python
|
lgpl-2.1
| 4,107
|
[
"Amber",
"Biopython",
"CHARMM",
"DL_POLY",
"Gromacs",
"LAMMPS",
"MDAnalysis",
"NAMD",
"NetCDF"
] |
c6daf2450c2f1cfab9a82334211b373cd7bab739dc3d6d9ed8c44488f2be7c58
|
#!/homes/dthybert/software/Python-2.7.5/python
import pysam
import scipy.stats
import sys
import argparse
class GenomeSegment:
def __init__(self,size, chr,start):
self.chr=chr
self.start=start
self.windowsSmoothing=5
self.lstPos=[0]*size
self.lstNbRead=[0]*size
self.lstFraction=[0.0]*size
self.lstNormFraction=[0.0]*size
self.lstOtherInformation=[[]]*size
self.smoothedNbReads=[0.0]*size
self.smoothedFraction=[0.0]*size
def addPosition(self, position, index):
tabLine=position.split("\t")
self.lstPos[index]=int(tabLine[1])
self.lstNbRead[index]=int(tabLine[2])
self.lstFraction[index]=float(tabLine[3])
self.lstNormFraction[index]=float(tabLine[4])
self.lstOtherInformation[index]=tabLine[5:]
def _average(self,lst):
sum=0
for v in lst:
sum=v+sum
return float(sum)/float(len(lst))
def smooth(self,size):
i=0
size=len(self.lstPos)
while i < size:
smoothNBRead=0.0
smmothFraction=0.0
if i < 5:
smoothNBRead=self._average(self.lstNbRead[:i+self.windowsSmoothing])
smmothFraction=self._average(self.lstFraction[:i+self.windowsSmoothing])
elif i > size-5:
smoothNBRead=self._average(self.lstNbRead[i-self.windowsSmoothing:])
smmothFraction=self._average(self.lstFraction[i-self.windowsSmoothing:])
else:
smoothNBRead=self._average(self.lstNbRead[i-self.windowsSmoothing:i+self.windowsSmoothing])
smmothFraction=self._average(self.lstFraction[i-self.windowsSmoothing:i+self.windowsSmoothing])
self.smoothedNbReads[i]=smoothNBRead
self.smoothedFraction[i]=smmothFraction
i=i+1
def IdentifyGoodRegion(self, nbReadMini, FreqThreshold):
lstRegions=[]
start=self.start
end=self.start
i=0
while i < len(self.smoothedNbReads):
if self.smoothedNbReads[i] < nbReadMini and self.smoothedFraction[i] <FreqThreshold:
if start!=end:
lstRegions.append([self.chr, start,end])
start=self.start+i
end=self.start+i
else:
end=end+1
i=i+1
return lstRegions
def Z_score(val, mean,std):
return (float(val)-float(mean))/float(std)
def loadStatistics(strconfigFile):
statByFile={}
objFile=open(strconfigFile)
for line in objFile:
if line[0]=="#":
continue
tabLine=line.split()
file=tabLine[0]
mean=float(tabLine[1])
std=float(tabLine[2])
statByFile[file]=[mean,std]
return statByFile
def getString(dico, file,pos):
#print pos
lsttag=dico[file][pos]
stringTag="-"
for tag in lsttag:
if stringTag=="-":
stringTag=str(tag)
else:
stringTag=stringTag+","+str(tag)
return stringTag
def getLineToPrint(dico,index,pos,chr):
nbTotalOK=0
nbTotal=0
fractionOk=0.0
correctoedFractionOk=0.0
lstTotal=[]
lstFraction=[]
i=0
for sample in dico.keys():
lstTag=dico[sample][index]
nbTagOK=0
nbTagMQbad=0
for tag in lstTag:
if tag==1:
nbTagOK=nbTagOK+1
if tag==4:
nbTagMQbad=nbTagMQbad+1
lstTotal.append(nbTagOK)
sizeSample=len(lstTag)-nbTagMQbad
print sizeSample,len(lstTag)
if sizeSample==0:
fraction=0
else:
fraction=float(nbTagOK)/float(sizeSample)
lstFraction.append(fraction)
nbTotal=nbTotal+sizeSample
nbTotalOK=nbTotalOK+nbTagOK
for fr in lstFraction:
correctoedFractionOk=correctoedFractionOk+fr
correctoedFractionOk=correctoedFractionOk/float(len(lstFraction))
fractionOk=0.0
if nbTotal!=0:
fractionOk=float(nbTotalOK)/float(nbTotal)
string=chr+"\t"+str(pos)+"\t"+str(nbTotalOK)+"\t"+str(fractionOk)+"\t"+str(correctoedFractionOk)
i=0
for sample in dico.keys():
string=string+"\t"+str(lstTotal[i])+"\t"+str(lstFraction[i])
i=i+1
i=0
for sample in dico.keys():
string=string+"\t"+getString(dico,sample,index)
i=i+1
return string
def calculateFrequency(objreadcount, chr,start,end,outFile):
objFile=open(outFile,"a")
length=end-start+1
obgGenomeSegment=GenomeSegment(length,chr,start)
i=0
while i < length:
#print i, length
pos=start+i
string=getLineToPrint(objreadcount,i, pos, chr)
obgGenomeSegment.addPosition(string, i)
objFile.write(string+"\n")
#print string
i=i+1
objFile.close()
return obgGenomeSegment
##################################################################
#
#
#
#
#
#################################################################
def countReadsMate(lstFile,dicoStats,chr,start,end,threshold_pval,MQ):
dicoPos={}
for file in lstFile:
samfile = pysam.AlignmentFile(file, "rb")
lstPos=[[]]*(end-start+1)
for pileupcolumn in samfile.pileup(chr,start,end):
position=pileupcolumn.reference_pos
lst=[]
if position < start:
continue
if position > end:
break
posTab=position-start
for pReads in pileupcolumn.pileups:
if pReads.alignment.mapping_quality < MQ:
lst.append(4)
if pReads.alignment.mate_is_unmapped:
lst.append(0)
#lstPos[posTab].append(0)
elif samfile.getrname(pReads.alignment.next_reference_id) != chr:
lst.append(3)
else:
rend=pReads.alignment.reference_end
startMate=pReads.alignment.next_reference_start
delta=abs(startMate-rend)
mean=dicoStats[file][0]
std=dicoStats[file][1]
z=Z_score(delta,mean,std)
p_value = scipy.stats.norm.sf([abs(z)])[0]
#print pReads.alignment.next_reference_id
#print mean, std, delta, p_value
if p_value < threshold_pval:
lst.append(2)
else:
lst.append(1)
lstPos[posTab]=lst
dicoPos[file]=lstPos
return dicoPos
def saveLstRegion(lstRegion, fileOut):
objFile=open(fileOut,"a")
for region in lstRegion:
string=region[0]+"\t"+str(region[1])+"\t"+str(region[2])+"\n"
objFile.write(string)
objFile.close()
def main(param):
dicoStats=loadStatistics(param.strConfigFile)
##InitFileTo analyse
outfile=param.outFile
outReadCount=outfile+".rdc"
outGoodRegion=outfile+".bed"
objFile=open(outReadCount,"w")
objFile.close()
objFile=open(outGoodRegion,"w")
objFile.close()
lstBams=param.lstBamFiles.split(",")
CurrStart=param.start
CurrEnd=param.start+param.bin-1
#print end-start
if param.end-param.start < param.bin:
CurrEnd=param.end
while CurrEnd <=param.end:
##count reads pair
print "counting paired reads"
hashReadCount=countReadsMate(lstBams,dicoStats,param.chr,CurrStart,CurrEnd,param.pvalMate,param.MQthreshold)
## calculate some stat and create an object that represnt genome segment (save the data in file
print " calculate frequencies"
objGenomSegment=calculateFrequency(hashReadCount,param.chr,CurrStart,CurrEnd,outReadCount)
## get the regioni
print "smoothing count"
objGenomSegment.smooth(param.smoothingWindows)
print "identify regions"
lstRegion=objGenomSegment.IdentifyGoodRegion(param.minReads, param.minFreq)
## save the regions
saveLstRegion(lstRegion,outGoodRegion)
CurrStart=CurrEnd+1
CurrEnd=CurrStart+param.bin-1
if CurrEnd > param.end:
CurrEnd=param.end
if CurrEnd<=CurrStart:
break
####################################################################################
parser = argparse.ArgumentParser()
parser.add_argument('--bam_files', action='store', dest='lstBamFiles', default ="", help='liste of bam file to analyse format : bam1,bam2,...,bamN',required=True)
parser.add_argument('--config', action='store', dest='strConfigFile', help='configuration file describing the mean and std of the insert per library', required=True)
parser.add_argument('--out', action='store', dest='outFile', help='output file prefix where the data will be stored ', required=True)
parser.add_argument('--chr', action='store', dest='chr', help='chromosome to analyse',required=True)
parser.add_argument('--start', action='store', dest='start', help='start of the region to analyse',required=True, type=int)
parser.add_argument('--end', action='store', dest='end', help='end of the region to analyse\n',required=True,type=int)
parser.add_argument('--pval_mate', action='store', dest='pvalMate', help='pval threshold that two mates are in a good distance [0.0001]', default=0.0001, type=float)
parser.add_argument('--min_reads', action='store', dest='minReads', help='minimum number of reads that satisfy the pair-ends constraints required to have a "good" region [8]', default=8, type=int)
parser.add_argument('--min_freq', action='store', dest='minFreq', help='frequency threshold of reads satisfying the pair-end constraints to have a good regions [0.2]', default=0.2, type=float)
parser.add_argument('--MQ', action='store', dest='MQthreshold', help='reads with a mapping quality < MQ won\'t be considered [25]', default=25, type =int)
parser.add_argument('--smoothing_size', action='store', dest='smoothingWindows', help='size of the windows used to smooth the dataseti [5]', default=5, type=int)
parser.add_argument('--bin', action='store', dest='bin', help='number of position evaluated before storing in file (this is for performances issues) [30000]', default=30000, type=int)
param = parser.parse_args()
main(param)
|
tk2/assembly-eval
|
consistent/GetconsistantRegions.py
|
Python
|
mit
| 8,960
|
[
"pysam"
] |
c1496f2bb12dc139177a6a139b7a4ac97aefd14eed701c64cc0234923377ef28
|
# -*- coding: utf-8 -*-
"""Tests for the query parser"""
from unittest import TestCase
from nose.tools import eq_
from dxr.plugins import plugins_named
from dxr.query import query_grammar, QueryVisitor
class VisitorTests(TestCase):
"""Make sure ``QueryVisitor`` is putting together sane data structures."""
def visit(self, query):
return QueryVisitor().visit(query_grammar(plugins_named(['core', 'clang'])).parse(query))
def test_overall(self):
"""Test the overall structure."""
eq_(self.visit('regexp:(?i)snork'),
[{'arg': '(?i)snork',
'name': 'regexp',
'not': False,
'case_sensitive': False,
'qualified': False}])
def test_tricksy_orphanses(self):
"""Try to trick the parser into prematurely committing to various
classifications."""
eq_(self.visit('- -+ @ +- -+fred +type: +-type:hey type: smoo hi:mom +boo'),
[{'arg': '-',
'name': 'text',
'not': False,
'case_sensitive': False,
'qualified': False},
{'arg': '+',
'name': 'text',
'not': True,
'case_sensitive': False,
'qualified': False},
{'arg': '@',
'name': 'text',
'not': False,
'case_sensitive': False,
'qualified': False},
{'arg': '+-',
'name': 'text',
'not': False,
'case_sensitive': False,
'qualified': False},
{'arg': '+fred',
'name': 'text',
'not': True,
'case_sensitive': False,
'qualified': False},
{'arg': '+type:',
'name': 'text',
'not': False,
'case_sensitive': False,
'qualified': False},
{'arg': '+-type:hey',
'name': 'text',
'not': False,
'case_sensitive': False,
'qualified': False},
{'arg': 'type:',
'name': 'text',
'not': False,
'case_sensitive': False,
'qualified': False},
{'arg': 'smoo',
'name': 'text',
'not': False,
'case_sensitive': False,
'qualified': False},
{'arg': 'hi:mom',
'name': 'text',
'not': False,
'case_sensitive': False,
'qualified': False},
{'arg': '+boo',
'name': 'text',
'not': False,
'case_sensitive': False,
'qualified': False}])
def test_normal_things(self):
"""Make sure normal, everyday things that should work do."""
eq_(self.visit('regexp:smoo -regexp:foo|bar -baz qux foo Foo @foo type:yeah'),
[{'arg': 'smoo',
'name': 'regexp',
'not': False,
'case_sensitive': False,
'qualified': False},
{'arg': 'foo|bar',
'name': 'regexp',
'not': True,
'case_sensitive': False,
'qualified': False},
{'arg': 'baz',
'name': 'text',
'not': True,
'case_sensitive': False,
'qualified': False},
{'arg': 'qux',
'name': 'text',
'not': False,
'case_sensitive': False,
'qualified': False},
{'arg': 'foo',
'name': 'text',
'not': False,
'case_sensitive': False,
'qualified': False},
{'arg': 'Foo',
'name': 'text',
'not': False,
'case_sensitive': True,
'qualified': False},
{'arg': 'foo',
'name': 'text',
'not': False,
'case_sensitive': True,
'qualified': False},
{'arg': 'yeah',
'name': 'type',
'not': False,
'case_sensitive': False,
'qualified': False}])
def test_qualified(self):
"""Make sure fully-qualified filters are recognized."""
eq_(self.visit('+type:Snork'),
[{'arg': 'Snork',
'name': 'type',
'not': False,
'case_sensitive': True,
'qualified': True}])
def test_unclosed_quotes(self):
"""An unclosed quoted string should be considered as if it were closed.
This makes it more likely we perform the same sorts of searches while
you're still typing as we will once you get to the end, yielding more
useful incremental results.
"""
eq_(self.visit('"this here thing'),
[{'arg': 'this here thing',
'name': 'text',
'not': False,
'case_sensitive': False,
'qualified': False}])
def test_literal_quotes(self):
"""Make sure we can express literal quotes when we want to.
Also accidentally test ignoring of leading and trailing spaces.
"""
eq_(self.visit(""" '"this' 'here"' "'thing'" """),
[{'arg': '"this',
'name': 'text',
'not': False,
'case_sensitive': False,
'qualified': False},
{'arg': 'here"',
'name': 'text',
'not': False,
'case_sensitive': False,
'qualified': False},
{'arg': "'thing'",
'name': 'text',
'not': False,
'case_sensitive': False,
'qualified': False}])
def test_bare_unicode(self):
"""Make sure non-ASCII chars are recognized in bare text."""
eq_(self.visit(u'börg'),
[{'arg': u'börg',
'name': 'text',
'not': False,
'case_sensitive': False,
'qualified': False}])
def test_empty(self):
"""An empty query shouldn't give a ParseError."""
eq_(self.visit(''), [])
# Not in VisitorTests because nose doesn't support test generators in TestCase
# subclasses.
def test_quotes():
"""Test the quoted-string regexes, both with double and single quotes."""
tests = [(r'"hi there"', r'hi there'),
(r'"hi"there"', r'hi"there'),
(r'"hi"there"d', r'hi"there"d'), # Don't prematurely stop capturing when we hit a quote without a space after it.
(r'"hi\" and"', r'hi" and'), # Don't count a backslashed quote as a closing one, even if it has a space after it.
(r'"hi \pthere\"boogy"', r'hi \pthere"boogy'), # Preserve backslashes that don't escape a quote.
(r'"multi word', r'multi word'), # Get all words in a space-having input without closing quotes.
(r'"\\""', r'\"'), # It is possible to express backslash-quote.
(ur'"sñork"', ur'sñork')] # Unicode holds up in quoted strings.
for rule_name, transform in [('double_quoted_text',
lambda x: x),
('single_quoted_text',
lambda x: x.replace('"', "'"))]:
rule = query_grammar([])[rule_name]
for input, output in tests:
def test_something():
eq_(QueryVisitor().visit(rule.match(transform(input))),
transform(output))
yield test_something
|
jbradberry/dxr
|
tests/test_query_parser.py
|
Python
|
mit
| 7,592
|
[
"VisIt"
] |
b4fe640df0369f98df6375d94dca31421dc744e3606169382e63b4098fe535ef
|
""" SiteInspectorAgent
This agent inspect Sites, and evaluates policies that apply.
The following options can be set for the SiteInspectorAgent.
.. literalinclude:: ../ConfigTemplate.cfg
:start-after: ##BEGIN SiteInspectorAgent
:end-before: ##END
:dedent: 2
:caption: SiteInspectorAgent options
"""
import datetime
import concurrent.futures
from DIRAC import S_OK, S_ERROR
from DIRAC.Core.Base.AgentModule import AgentModule
from DIRAC.Core.Utilities.ObjectLoader import ObjectLoader
from DIRAC.ResourceStatusSystem.PolicySystem.PEP import PEP
AGENT_NAME = "ResourceStatus/SiteInspectorAgent"
class SiteInspectorAgent(AgentModule):
"""SiteInspectorAgent
The SiteInspectorAgent agent is an agent that is used to get the all the site names
and trigger PEP to evaluate their status.
"""
# Max number of worker threads by default
__maxNumberOfThreads = 15
# Inspection freqs, defaults, the lower, the higher priority to be checked.
# Error state usually means there is a glitch somewhere, so it has the highest
# priority.
__checkingFreqs = {"Active": 20, "Degraded": 20, "Probing": 20, "Banned": 15, "Unknown": 10, "Error": 5}
def __init__(self, *args, **kwargs):
AgentModule.__init__(self, *args, **kwargs)
self.rsClient = None
self.clients = {}
def initialize(self):
"""Standard initialize."""
res = ObjectLoader().loadObject("DIRAC.ResourceStatusSystem.Client.ResourceManagementClient")
if not res["OK"]:
self.log.error("Failed to load ResourceManagementClient class: %s" % res["Message"])
return res
rmClass = res["Value"]
res = ObjectLoader().loadObject("DIRAC.ResourceStatusSystem.Client.ResourceStatusClient")
if not res["OK"]:
self.log.error("Failed to load ResourceStatusClient class: %s" % res["Message"])
return res
rsClass = res["Value"]
self.rsClient = rsClass()
self.clients["ResourceStatusClient"] = rsClass()
self.clients["ResourceManagementClient"] = rmClass()
maxNumberOfThreads = self.am_getOption("maxNumberOfThreads", 15)
self.log.info("Multithreaded with %d threads" % maxNumberOfThreads)
self.threadPoolExecutor = concurrent.futures.ThreadPoolExecutor(max_workers=maxNumberOfThreads)
return S_OK()
def execute(self):
"""
It gets the sites from the Database which are eligible to be re-checked.
"""
utcnow = datetime.datetime.utcnow().replace(microsecond=0)
future_to_element = {}
# get the current status
res = self.rsClient.selectStatusElement("Site", "Status")
if not res["OK"]:
return res
# filter elements
for site in res["Value"]:
# Maybe an overkill, but this way I have NEVER again to worry about order
# of elements returned by mySQL on tuples
siteDict = dict(zip(res["Columns"], site))
# This if-clause skips all the elements that should not be checked yet
timeToNextCheck = self.__checkingFreqs[siteDict["Status"]]
if utcnow <= siteDict["LastCheckTime"] + datetime.timedelta(minutes=timeToNextCheck):
continue
# We skip the elements with token different than "rs_svc"
if siteDict["TokenOwner"] != "rs_svc":
self.log.verbose("Skipping %s with token %s" % (siteDict["Name"], siteDict["TokenOwner"]))
continue
# if we are here, we process the current element
self.log.verbose('"%s" # %s # %s' % (siteDict["Name"], siteDict["Status"], siteDict["LastCheckTime"]))
lowerElementDict = {"element": "Site"}
for key, value in siteDict.items():
if len(key) >= 2: # VO !
lowerElementDict[key[0].lower() + key[1:]] = value
# We process lowerElementDict
future = self.threadPoolExecutor.submit(self._execute, lowerElementDict)
future_to_element[future] = siteDict["Name"]
for future in concurrent.futures.as_completed(future_to_element):
transID = future_to_element[future]
try:
future.result()
except Exception as exc:
self.log.exception("%s generated an exception: %s" % (transID, exc))
else:
self.log.info("Processed", transID)
return S_OK()
def _execute(self, site):
"""
Method run by each of the thread that is in the ThreadPool.
It evaluates the policies for such site and enforces the necessary actions.
"""
pep = PEP(clients=self.clients)
self.log.verbose(
"%s ( VO=%s / status=%s / statusType=%s ) being processed"
% (site["name"], site["vO"], site["status"], site["statusType"])
)
try:
res = pep.enforce(site)
except Exception:
self.log.exception("Exception during enforcement")
res = S_ERROR("Exception during enforcement")
if not res["OK"]:
self.log.error("Failed policy enforcement", res["Message"])
return res
resEnforce = res["Value"]
oldStatus = resEnforce["decisionParams"]["status"]
statusType = resEnforce["decisionParams"]["statusType"]
newStatus = resEnforce["policyCombinedResult"]["Status"]
reason = resEnforce["policyCombinedResult"]["Reason"]
if oldStatus != newStatus:
self.log.info(
"%s (%s) is now %s ( %s ), before %s" % (site["name"], statusType, newStatus, reason, oldStatus)
)
def finalize(self):
"""graceful finalization"""
self.log.info("Wait for threads to get empty before terminating the agent")
self.threadPoolExecutor.shutdown()
self.log.info("Threads are empty, terminating the agent...")
return S_OK()
|
DIRACGrid/DIRAC
|
src/DIRAC/ResourceStatusSystem/Agent/SiteInspectorAgent.py
|
Python
|
gpl-3.0
| 6,013
|
[
"DIRAC"
] |
9d9fe3ad83e0cc2337187146c0a62dbfe26fe2902708987fb3c73489d41c500a
|
#!/usr/bin/env python
########################################################################
# File : dirac-proxy-init.py
# Author : Adrian Casajus
########################################################################
"""
Creating a proxy.
Example:
$ dirac-proxy-init -g dirac_user -t --rfc
Enter Certificate password:
"""
from __future__ import division
from __future__ import absolute_import
from __future__ import print_function
import os
import sys
import glob
import time
import datetime
import DIRAC
from DIRAC import gLogger, S_OK, S_ERROR
from DIRAC.Core.Utilities.DIRACScript import DIRACScript as Script
from DIRAC.FrameworkSystem.Client import ProxyGeneration, ProxyUpload
from DIRAC.Core.Security import X509Chain, ProxyInfo, VOMS
from DIRAC.Core.Security.Locations import getCAsLocation
from DIRAC.ConfigurationSystem.Client.Helpers import Registry
from DIRAC.FrameworkSystem.Client.BundleDeliveryClient import BundleDeliveryClient
__RCSID__ = "$Id$"
class Params(ProxyGeneration.CLIParams):
addVOMSExt = False
uploadProxy = True
uploadPilot = False
def setVOMSExt(self, _arg):
self.addVOMSExt = True
return S_OK()
def disableProxyUpload(self, _arg):
self.uploadProxy = False
return S_OK()
def registerCLISwitches(self):
ProxyGeneration.CLIParams.registerCLISwitches(self)
Script.registerSwitch(
"N", "no-upload", "Do not upload a long lived proxy to the ProxyManager", self.disableProxyUpload
)
Script.registerSwitch("M", "VOMS", "Add voms extension", self.setVOMSExt)
class ProxyInit(object):
def __init__(self, piParams):
self.__piParams = piParams
self.__issuerCert = False
self.__proxyGenerated = False
self.__uploadedInfo = {}
def getIssuerCert(self):
if self.__issuerCert:
return self.__issuerCert
proxyChain = X509Chain.X509Chain()
resultProxyChainFromFile = proxyChain.loadChainFromFile(self.__piParams.certLoc)
if not resultProxyChainFromFile["OK"]:
gLogger.error("Could not load the proxy: %s" % resultProxyChainFromFile["Message"])
sys.exit(1)
resultIssuerCert = proxyChain.getIssuerCert()
if not resultIssuerCert["OK"]:
gLogger.error("Could not load the proxy: %s" % resultIssuerCert["Message"])
sys.exit(1)
self.__issuerCert = resultIssuerCert["Value"]
return self.__issuerCert
def certLifeTimeCheck(self):
minLife = Registry.getGroupOption(self.__piParams.diracGroup, "SafeCertificateLifeTime", 2592000)
resultIssuerCert = self.getIssuerCert()
resultRemainingSecs = resultIssuerCert.getRemainingSecs() # pylint: disable=no-member
if not resultRemainingSecs["OK"]:
gLogger.error("Could not retrieve certificate expiration time", resultRemainingSecs["Message"])
return
lifeLeft = resultRemainingSecs["Value"]
if minLife > lifeLeft:
daysLeft = int(lifeLeft / 86400)
msg = "Your certificate will expire in less than %d days. Please renew it!" % daysLeft
sep = "=" * (len(msg) + 4)
msg = "%s\n %s \n%s" % (sep, msg, sep)
gLogger.notice(msg)
def addVOMSExtIfNeeded(self):
addVOMS = self.__piParams.addVOMSExt or Registry.getGroupOption(
self.__piParams.diracGroup, "AutoAddVOMS", False
)
if not addVOMS:
return S_OK()
vomsAttr = Registry.getVOMSAttributeForGroup(self.__piParams.diracGroup)
if not vomsAttr:
return S_ERROR(
"Requested adding a VOMS extension but no VOMS attribute defined for group %s"
% self.__piParams.diracGroup
)
resultVomsAttributes = VOMS.VOMS().setVOMSAttributes(
self.__proxyGenerated, attribute=vomsAttr, vo=Registry.getVOMSVOForGroup(self.__piParams.diracGroup)
)
if not resultVomsAttributes["OK"]:
return S_ERROR(
"Could not add VOMS extensions to the proxy\nFailed adding VOMS attribute: %s"
% resultVomsAttributes["Message"]
)
gLogger.notice("Added VOMS attribute %s" % vomsAttr)
chain = resultVomsAttributes["Value"]
result = chain.dumpAllToFile(self.__proxyGenerated)
if not result["OK"]:
return result
return S_OK()
def createProxy(self):
"""Creates the proxy on disk"""
gLogger.notice("Generating proxy...")
resultProxyGenerated = ProxyGeneration.generateProxy(piParams)
if not resultProxyGenerated["OK"]:
gLogger.error(resultProxyGenerated["Message"])
sys.exit(1)
self.__proxyGenerated = resultProxyGenerated["Value"]
return resultProxyGenerated
def uploadProxy(self):
"""Upload the proxy to the proxyManager service"""
issuerCert = self.getIssuerCert()
resultUserDN = issuerCert.getSubjectDN() # pylint: disable=no-member
if not resultUserDN["OK"]:
return resultUserDN
userDN = resultUserDN["Value"]
gLogger.notice("Uploading proxy..")
if userDN in self.__uploadedInfo:
expiry = self.__uploadedInfo[userDN].get("")
if expiry:
if (
issuerCert.getNotAfterDate()["Value"] - datetime.timedelta(minutes=10) < expiry
): # pylint: disable=no-member
gLogger.info('Proxy with DN "%s" already uploaded' % userDN)
return S_OK()
gLogger.info("Uploading %s proxy to ProxyManager..." % userDN)
upParams = ProxyUpload.CLIParams()
upParams.onTheFly = True
upParams.proxyLifeTime = issuerCert.getRemainingSecs()["Value"] - 300 # pylint: disable=no-member
upParams.rfcIfPossible = self.__piParams.rfc
for k in ("certLoc", "keyLoc", "userPasswd"):
setattr(upParams, k, getattr(self.__piParams, k))
resultProxyUpload = ProxyUpload.uploadProxy(upParams)
if not resultProxyUpload["OK"]:
gLogger.error(resultProxyUpload["Message"])
return resultProxyUpload
self.__uploadedInfo = resultProxyUpload["Value"]
gLogger.info("Proxy uploaded")
return S_OK()
def printInfo(self):
"""Printing utilities"""
resultProxyInfoAsAString = ProxyInfo.getProxyInfoAsString(self.__proxyGenerated)
if not resultProxyInfoAsAString["OK"]:
gLogger.error("Failed to get the new proxy info: %s" % resultProxyInfoAsAString["Message"])
else:
gLogger.notice("Proxy generated:")
gLogger.notice(resultProxyInfoAsAString["Value"])
if self.__uploadedInfo:
gLogger.notice("\nProxies uploaded:")
maxDNLen = 0
maxGroupLen = 0
for userDN in self.__uploadedInfo:
maxDNLen = max(maxDNLen, len(userDN))
for group in self.__uploadedInfo[userDN]:
maxGroupLen = max(maxGroupLen, len(group))
gLogger.notice(" %s | %s | Until (GMT)" % ("DN".ljust(maxDNLen), "Group".ljust(maxGroupLen)))
for userDN in self.__uploadedInfo:
for group in self.__uploadedInfo[userDN]:
gLogger.notice(
" %s | %s | %s"
% (
userDN.ljust(maxDNLen),
group.ljust(maxGroupLen),
self.__uploadedInfo[userDN][group].strftime("%Y/%m/%d %H:%M"),
)
)
def checkCAs(self):
caDir = getCAsLocation()
if not caDir:
gLogger.warn("No valid CA dir found.")
return
# In globus standards .r0 files are CRLs. They have the same names of the CAs but diffent file extension
searchExp = os.path.join(caDir, "*.r0")
crlList = glob.glob(searchExp)
if not crlList:
gLogger.warn("No CRL files found for %s. Abort check of CAs" % searchExp)
return
newestFPath = max(crlList, key=os.path.getmtime)
newestFTime = os.path.getmtime(newestFPath)
if newestFTime > (time.time() - (2 * 24 * 3600)):
# At least one of the files has been updated in the last 2 days
return S_OK()
if not os.access(caDir, os.W_OK):
gLogger.error("Your CRLs appear to be outdated, but you have no access to update them.")
# Try to continue anyway...
return S_OK()
# Update the CAs & CRLs
gLogger.notice("Your CRLs appear to be outdated; attempting to update them...")
bdc = BundleDeliveryClient()
res = bdc.syncCAs()
if not res["OK"]:
gLogger.error("Failed to update CAs", res["Message"])
res = bdc.syncCRLs()
if not res["OK"]:
gLogger.error("Failed to update CRLs", res["Message"])
# Continue even if the update failed...
return S_OK()
def doTheMagic(self):
proxy = self.createProxy()
if not proxy["OK"]:
return proxy
self.checkCAs()
pI.certLifeTimeCheck()
resultProxyWithVOMS = pI.addVOMSExtIfNeeded()
if not resultProxyWithVOMS["OK"]:
if "returning a valid AC for the user" in resultProxyWithVOMS["Message"]:
gLogger.error(resultProxyWithVOMS["Message"])
gLogger.error("\n Are you sure you are properly registered in the VO?")
elif "Missing voms-proxy" in resultProxyWithVOMS["Message"]:
gLogger.notice("Failed to add VOMS extension: no standard grid interface available")
else:
gLogger.error(resultProxyWithVOMS["Message"])
if self.__piParams.strict:
return resultProxyWithVOMS
if self.__piParams.uploadProxy:
resultProxyUpload = pI.uploadProxy()
if not resultProxyUpload["OK"]:
if self.__piParams.strict:
return resultProxyUpload
return S_OK()
@Script()
def main():
global piParams, pI
piParams = Params()
piParams.registerCLISwitches()
# Take off tokens
os.environ["DIRAC_USE_ACCESS_TOKEN"] = "False"
Script.disableCS()
Script.parseCommandLine(ignoreErrors=True)
DIRAC.gConfig.setOptionValue("/DIRAC/Security/UseServerCertificate", "False")
pI = ProxyInit(piParams)
resultDoTheMagic = pI.doTheMagic()
if not resultDoTheMagic["OK"]:
gLogger.fatal(resultDoTheMagic["Message"])
sys.exit(1)
pI.printInfo()
sys.exit(0)
if __name__ == "__main__":
main()
|
ic-hep/DIRAC
|
src/DIRAC/FrameworkSystem/scripts/dirac_proxy_init.py
|
Python
|
gpl-3.0
| 10,817
|
[
"DIRAC"
] |
fa889b6cd1e4714e439a937d409a33b880ce99f2a9231524af52c78861789efe
|
import numpy as np
def affine_forward(x, w, b):
"""
Computes the forward pass for an affine (fully-connected) layer.
The input x has shape (N, d_1, ..., d_k) and contains a minibatch of N
examples, where each example x[i] has shape (d_1, ..., d_k). We will
reshape each input into a vector of dimension D = d_1 * ... * d_k, and
then transform it to an output vector of dimension M.
Inputs:
- x: A numpy array containing input data, of shape (N, d_1, ..., d_k)
- w: A numpy array of weights, of shape (D, M)
- b: A numpy array of biases, of shape (M,)
Returns a tuple of:
- out: output, of shape (N, M)
- cache: (x, w, b)
"""
N = x.shape[0]
D = np.prod(x.shape[1:])
out = np.dot(np.reshape(x, (N, D)), w) + b
cache = (x, w, b)
return out, cache
def affine_backward(dout, cache):
"""
Computes the backward pass for an affine layer.
Inputs:
- dout: Upstream derivative, of shape (N, M)
- cache: Tuple of:
- x: Input data, of shape (N, d_1, ... d_k)
- w: Weights, of shape (D, M)
Returns a tuple of:
- dx: Gradient with respect to x, of shape (N, d1, ..., d_k)
- dw: Gradient with respect to w, of shape (D, M)
- db: Gradient with respect to b, of shape (M,)
"""
x, w, b = cache
N = x.shape[0]
D = np.prod(x.shape[1:])
db = np.sum(dout, axis=0)
dx = np.reshape(np.dot(dout, w.T), x.shape)
dw = np.dot(np.reshape(x, (N, D)).T, dout)
return dx, dw, db
def relu_forward(x):
"""
Computes the forward pass for a layer of rectified linear units (ReLUs).
Input:
- x: Inputs, of any shape
Returns a tuple of:
- out: Output, of the same shape as x
- cache: x
"""
out = np.maximum(0, x)
cache = x
return out, cache
def relu_backward(dout, cache):
"""
Computes the backward pass for a layer of rectified linear units (ReLUs).
Input:
- dout: Upstream derivatives, of any shape
- cache: Input x, of same shape as dout
Returns:
- dx: Gradient with respect to x
"""
x = cache
dx = dout * (x > 0)
return dx
def batchnorm_forward(x, gamma, beta, bn_param):
"""
Forward pass for batch normalization.
During training the sample mean and (uncorrected) sample variance are
computed from minibatch statistics and used to normalize the incoming data.
During training we also keep an exponentially decaying running mean of the mean
and variance of each feature, and these averages are used to normalize data
at test-time.
At each timestep we update the running averages for mean and variance using
an exponential decay based on the momentum parameter:
running_mean = momentum * running_mean + (1 - momentum) * sample_mean
running_var = momentum * running_var + (1 - momentum) * sample_var
Note that the batch normalization paper suggests a different test-time
behavior: they compute sample mean and variance for each feature using a
large number of training images rather than using a running average. For
this implementation we have chosen to use running averages instead since
they do not require an additional estimation step; the torch7 implementation
of batch normalization also uses running averages.
Input:
- x: Data of shape (N, D)
- gamma: Scale parameter of shape (D,)
- beta: Shift paremeter of shape (D,)
- bn_param: Dictionary with the following keys:
- mode: 'train' or 'test'; required
- eps: Constant for numeric stability
- momentum: Constant for running mean / variance.
- running_mean: Array of shape (D,) giving running mean of features
- running_var Array of shape (D,) giving running variance of features
Returns a tuple of:
- out: of shape (N, D)
- cache: A tuple of values needed in the backward pass
"""
mode = bn_param['mode']
eps = bn_param.get('eps', 1e-5)
momentum = bn_param.get('momentum', 0.9)
N, D = x.shape
running_mean = bn_param.get('running_mean', np.zeros(D, dtype=x.dtype))
running_var = bn_param.get('running_var', np.zeros(D, dtype=x.dtype))
cache = ()
if mode == 'train':
# Calculate mean and standard deviation
mean = np.mean(x, axis=0)
var = np.var(x, axis=0)
std = np.sqrt(var + eps)
# Update running values
running_mean = momentum * running_mean + (1 - momentum) * mean
running_var = momentum * running_var + (1 - momentum) * var
x_norm = (x - mean) / std
out = gamma * x_norm + beta
# Store the updated running means back into bn_param
bn_param['running_mean'] = running_mean
bn_param['running_var'] = running_var
cache = (x, mean, var, eps, std, gamma, x_norm)
elif mode == 'test':
x_norm = (x - running_mean) / np.sqrt(running_var + eps)
out = gamma * x_norm + beta
else:
raise ValueError('Invalid forward batchnorm mode "%s"' % mode)
return out, cache
def batchnorm_backward(dout, cache):
"""
Backward pass for batch normalization.
For this implementation, you should write out a computation graph for
batch normalization on paper and propagate gradients backward through
intermediate nodes.
Inputs:
- dout: Upstream derivatives, of shape (N, D)
- cache: Variable of intermediates from batchnorm_forward.
Returns a tuple of:
- dx: Gradient with respect to inputs x, of shape (N, D)
- dgamma: Gradient with respect to scale parameter gamma, of shape (D,)
- dbeta: Gradient with respect to shift parameter beta, of shape (D,)
"""
return batchnorm_backward_alt(dout, cache)
def batchnorm_backward_alt(dout, cache):
"""
Alternative backward pass for batch normalization.
For this implementation you should work out the derivatives for the batch
normalizaton backward pass on paper and simplify as much as possible. You
should be able to derive a simple expression for the backward pass.
Note: This implementation should expect to receive the same cache variable
as batchnorm_backward, but might not use all of the values in the cache.
Inputs / outputs: Same as batchnorm_backward
"""
x, mean, var, eps, std, gamma, x_norm = cache
N = x.shape[0]
dbeta = np.sum(dout, axis=0)
dgamma = np.sum(dout * x_norm, axis=0)
dx_norm = dout * gamma
dmean = np.sum(-dx_norm / std, axis=0)
dvar = np.sum(-0.5 * dx_norm * (x - mean) / ((var + eps) * std), axis=0)
dx = dx_norm / std + dmean / N + 2 * (dvar / N) * (x - mean)
return dx, dgamma, dbeta
def dropout_forward(x, dropout_param):
"""
Performs the forward pass for (inverted) dropout.
Inputs:
- x: Input data, of any shape
- dropout_param: A dictionary with the following keys:
- p: Dropout parameter. We drop each neuron output with probability p.
- mode: 'test' or 'train'. If the mode is train, then perform dropout;
if the mode is test, then just return the input.
- seed: Seed for the random number generator. Passing seed makes this
function deterministic, which is needed for gradient checking but not in
real networks.
Outputs:
- out: Array of the same shape as x.
- cache: A tuple (dropout_param, mask). In training mode, mask is the dropout
mask that was used to multiply the input; in test mode, mask is None.
"""
p, mode = dropout_param['p'], dropout_param['mode']
if 'seed' in dropout_param:
np.random.seed(dropout_param['seed'])
mask = None
out = None
if mode == 'train':
mask = np.random.random(x.shape) < p
out = x * mask / p
cache = (dropout_param, mask)
elif mode == 'test':
out = x
cache = None
else:
raise ValueError('Invalid forward dropout mode "%s"' % mode)
return out, cache
def dropout_backward(dout, cache):
"""
Perform the backward pass for (inverted) dropout.
Inputs:
- dout: Upstream derivatives, of any shape
- cache: (dropout_param, mask) from dropout_forward.
"""
dropout_param, mask = cache
p, mode = dropout_param['p'], dropout_param['mode']
if mode == 'train':
dx = dout * mask / p
elif mode == 'test':
dx = dout
return dx
def conv_forward_naive(x, w, b, conv_param):
"""
A naive implementation of the forward pass for a convolutional layer.
The input consists of N data points, each with C channels, height H and width
W. We convolve each input with F different filters, where each filter spans
all C channels and has height HH and width HH.
Input:
- x: Input data of shape (N, C, H, W)
- w: Filter weights of shape (F, C, HH, WW)
- b: Biases, of shape (F,)
- conv_param: A dictionary with the following keys:
- 'stride': The number of pixels between adjacent receptive fields in the
horizontal and vertical directions.
- 'pad': The number of pixels that will be used to zero-pad the input.
Returns a tuple of:
- out: Output data, of shape (N, F, H', W') where H' and W' are given by
H' = 1 + (H + 2 * pad - HH) / stride
W' = 1 + (W + 2 * pad - WW) / stride
- cache: (x, w, b, conv_param)
"""
pad, stride = conv_param['pad'], conv_param['stride']
N, C, H, W = x.shape
F, C, HH, WW = w.shape
_H = 1 + (H + 2 * pad - HH) / stride
_W = 1 + (W + 2 * pad - WW) / stride
assert np.floor(_H) == _H and np.floor(_W) == _W, 'Invalid dimensions for stride and padding'
pad_x = np.pad(x, ((0, 0), (0, 0), (pad, pad), (pad, pad)), 'constant')
out = np.zeros((N, F, _H, _W))
for i in np.arange(0, N):
for j in np.arange(0, F):
for k in np.arange(0, _H):
p = k * stride
for l in np.arange(0, _W):
q = l * stride
out[i, j, k, l] = np.sum(pad_x[i, :, p : p + HH, q : q + WW] * w[j]) + b[j]
cache = (x, pad_x, w, conv_param)
return out, cache
def conv_backward_naive(dout, cache):
"""
A naive implementation of the backward pass for a convolutional layer.
Inputs:
- dout: Upstream derivatives.
- cache: A tuple of (x, w, b, conv_param) as in conv_forward_naive
Returns a tuple of:
- dx: Gradient with respect to x
- dw: Gradient with respect to w
- db: Gradient with respect to b
"""
x, pad_x, w, conv_param = cache
pad, stride = conv_param['pad'], conv_param['stride']
N, C, H, W = x.shape
F, _, HH, WW = w.shape
_, _, _H, _W = dout.shape
# Calculating db
db = np.sum(dout, axis=(0, 2, 3))
dw = np.zeros_like(w)
dpad_x = np.zeros_like(pad_x)
for i in np.arange(0, N):
for j in np.arange(0, F):
for k in np.arange(0, _H):
p = k * stride
for l in np.arange(0, _W):
q = l * stride
# Calculating dw
dw[j] += pad_x[i, :, p : p + HH, q : q + WW] * dout[i, j, k, l]
# Calculating dx
dpad_x[i, :, p : p + HH, q : q + WW] += w[j] * dout[i, j, k, l]
dx = dpad_x[:, :, pad : H + pad, pad : W + pad]
return dx, dw, db
def max_pool_forward_naive(x, pool_param):
"""
A naive implementation of the forward pass for a max pooling layer.
Inputs:
- x: Input data, of shape (N, C, H, W)
- pool_param: dictionary with the following keys:
- 'pool_height': The height of each pooling region
- 'pool_width': The width of each pooling region
- 'stride': The distance between adjacent pooling regions
Returns a tuple of:
- out: Output data
- cache: (x, pool_param)
"""
pool_height, pool_width, stride = pool_param['pool_height'], \
pool_param['pool_width'], \
pool_param['stride']
N, C, H, W = x.shape
_W = (W - pool_width) / stride + 1
_H = (H - pool_height) / stride + 1
assert np.floor(_H) == _H and np.floor(_W) == _W, 'Invalid dimensions for stride and dimensions'
out = np.zeros((N, C, _H, _W))
for i in np.arange(0, N):
for j in np.arange(0, C):
for k in np.arange(0, _H):
p = k * stride
for l in np.arange(0, _W):
q = l * stride
out[i, j, k, l] = np.max(x[i, j, p : p + pool_height, q : q + pool_width])
cache = (x, pool_param)
return out, cache
def max_pool_backward_naive(dout, cache):
"""
A naive implementation of the backward pass for a max pooling layer.
Inputs:
- dout: Upstream derivatives
- cache: A tuple of (x, pool_param) as in the forward pass.
Returns:
- dx: Gradient with respect to x
"""
x, pool_param = cache
pool_height, pool_width, stride = pool_param['pool_height'], \
pool_param['pool_width'], \
pool_param['stride']
N, C, H, W = x.shape
_, _, _H, _W = dout.shape
dx = np.zeros_like(x)
for i in np.arange(0, N):
for j in np.arange(0, C):
for k in np.arange(0, _H):
p = k * stride
for l in np.arange(0, _W):
q = l * stride
window = np.reshape(
x[i, j, p : p + pool_height, q : q + pool_width],
(pool_height * pool_width))
dwindow = np.zeros_like(window)
dwindow[np.argmax(window)] = 1
dx[i, j, p : p + pool_height, q : q + pool_width] = np.reshape(
dwindow,
(pool_height, pool_width)) * dout[i, j, k, l]
return dx
def spatial_batchnorm_forward(x, gamma, beta, bn_param):
"""
Computes the forward pass for spatial batch normalization.
Inputs:
- x: Input data of shape (N, C, H, W)
- gamma: Scale parameter, of shape (C,)
- beta: Shift parameter, of shape (C,)
- bn_param: Dictionary with the following keys:
- mode: 'train' or 'test'; required
- eps: Constant for numeric stability
- momentum: Constant for running mean / variance. momentum=0 means that
old information is discarded completely at every time step, while
momentum=1 means that new information is never incorporated. The
default of momentum=0.9 should work well in most situations.
- running_mean: Array of shape (C, H * W) giving running mean of features
- running_var Array of shape (C, H * W) giving running variance of features
Returns a tuple of:
- out: Output data, of shape (N, C, H, W)
- cache: Values needed for the backward pass
"""
N, C, H, W = x.shape
new_x = np.reshape(np.transpose(x, (0, 2, 3, 1)), (N * H * W, C))
new_out, cache = batchnorm_forward(new_x, gamma, beta, bn_param)
out = np.transpose(np.reshape(new_out, (N, H, W, C)), (0, 3, 1, 2))
return out, cache
def spatial_batchnorm_backward(dout, cache):
"""
Computes the backward pass for spatial batch normalization.
Inputs:
- dout: Upstream derivatives, of shape (N, C, H, W)
- cache: Values from the forward pass
Returns a tuple of:
- dx: Gradient with respect to inputs, of shape (N, C, H, W)
- dgamma: Gradient with respect to scale parameter, of shape (C,)
- dbeta: Gradient with respect to shift parameter, of shape (C,)
"""
N, C, H, W = dout.shape
new_dout = np.reshape(np.transpose(dout, (0, 2, 3, 1)), (N * H * W, C))
new_dx, dgamma, dbeta = batchnorm_backward(new_dout, cache)
dx = np.transpose(np.reshape(new_dx, (N, H, W, C)), (0, 3, 1, 2))
return dx, dgamma, dbeta
def svm_loss(x, y):
"""
Computes the loss and gradient using for multiclass SVM classification.
Inputs:
- x: Input data, of shape (N, C) where x[i, j] is the score for the jth class
for the ith input.
- y: Vector of labels, of shape (N,) where y[i] is the label for x[i] and
0 <= y[i] < C
Returns a tuple of:
- loss: Scalar giving the loss
- dx: Gradient of the loss with respect to x
"""
N = x.shape[0]
correct_class_scores = x[np.arange(N), y]
margins = np.maximum(0, x - correct_class_scores[:, np.newaxis] + 1.0)
margins[np.arange(N), y] = 0
loss = np.sum(margins) / N
num_pos = np.sum(margins > 0, axis=1)
dx = np.zeros_like(x)
dx[margins > 0] = 1
dx[np.arange(N), y] -= num_pos
dx /= N
return loss, dx
def softmax_loss(x, y):
"""
Computes the loss and gradient for softmax classification.
Inputs:
- x: Input data, of shape (N, C) where x[i, j] is the score for the jth class
for the ith input.
- y: Vector of labels, of shape (N,) where y[i] is the label for x[i] and
0 <= y[i] < C
Returns a tuple of:
- loss: Scalar giving the loss
- dx: Gradient of the loss with respect to x
"""
probs = np.exp(x - np.max(x, axis=1, keepdims=True))
probs /= np.sum(probs, axis=1, keepdims=True)
N = x.shape[0]
loss = -np.sum(np.log(probs[np.arange(N), y])) / N
dx = probs.copy()
dx[np.arange(N), y] -= 1
dx /= N
return loss, dx
|
5hubh4m/CS231n
|
Assignment2/cs231n/layers.py
|
Python
|
mit
| 17,409
|
[
"NEURON"
] |
bed646f539feadbcc90c9f69b46d735e773149ece387cc82f6582242e6aa673f
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright: (c) 2014, Ruggero Marchei <ruggero.marchei@daemonzone.net>
# Copyright: (c) 2015, Brian Coca <bcoca@ansible.com>
# Copyright: (c) 2016-2017, Konstantin Shalygin <k0ste@k0ste.ru>
# Copyright: (c) 2017, Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['stableinterface'],
'supported_by': 'core'}
DOCUMENTATION = r'''
---
module: find
author: Brian Coca (@bcoca)
version_added: "2.0"
short_description: Return a list of files based on specific criteria
description:
- Return a list of files based on specific criteria. Multiple criteria are AND'd together.
- For Windows targets, use the M(win_find) module instead.
options:
age:
description:
- Select files whose age is equal to or greater than the specified time.
- Use a negative age to find files equal to or less than the specified time.
- You can choose seconds, minutes, hours, days, or weeks by specifying the
first letter of any of those words (e.g., "1w").
type: str
patterns:
default: '*'
description:
- One or more (shell or regex) patterns, which type is controlled by C(use_regex) option.
- The patterns restrict the list of files to be returned to those whose basenames match at
least one of the patterns specified. Multiple patterns can be specified using a list.
- The pattern is matched against the file base name, excluding the directory.
- When using regexen, the pattern MUST match the ENTIRE file name, not just parts of it. So
if you are looking to match all files ending in .default, you'd need to use '.*\.default'
as a regexp and not just '\.default'.
- This parameter expects a list, which can be either comma separated or YAML. If any of the
patterns contain a comma, make sure to put them in a list to avoid splitting the patterns
in undesirable ways.
type: list
aliases: [ pattern ]
excludes:
description:
- One or more (shell or regex) patterns, which type is controlled by C(use_regex) option.
- Items whose basenames match an C(excludes) pattern are culled from C(patterns) matches.
Multiple patterns can be specified using a list.
type: list
aliases: [ exclude ]
version_added: "2.5"
contains:
description:
- One or more regex patterns which should be matched against the file content.
type: str
paths:
description:
- List of paths of directories to search. All paths must be fully qualified.
type: list
required: true
aliases: [ name, path ]
file_type:
description:
- Type of file to select.
- The 'link' and 'any' choices were added in Ansible 2.3.
type: str
choices: [ any, directory, file, link ]
default: file
recurse:
description:
- If target is a directory, recursively descend into the directory looking for files.
type: bool
default: no
size:
description:
- Select files whose size is equal to or greater than the specified size.
- Use a negative size to find files equal to or less than the specified size.
- Unqualified values are in bytes but b, k, m, g, and t can be appended to specify
bytes, kilobytes, megabytes, gigabytes, and terabytes, respectively.
- Size is not evaluated for directories.
age_stamp:
description:
- Choose the file property against which we compare age.
type: str
choices: [ atime, ctime, mtime ]
default: mtime
hidden:
description:
- Set this to C(yes) to include hidden files, otherwise they will be ignored.
type: bool
default: no
follow:
description:
- Set this to C(yes) to follow symlinks in path for systems with python 2.6+.
type: bool
default: no
get_checksum:
description:
- Set this to C(yes) to retrieve a file's SHA1 checksum.
type: bool
default: no
use_regex:
description:
- If C(no), the patterns are file globs (shell).
- If C(yes), they are python regexes.
type: bool
default: no
depth:
description:
- Set the maximum number of levels to decend into.
- Setting recurse to C(no) will override this value, which is effectively depth 1.
- Default is unlimited depth.
type: int
version_added: "2.6"
seealso:
- module: win_find
'''
EXAMPLES = r'''
- name: Recursively find /tmp files older than 2 days
find:
paths: /tmp
age: 2d
recurse: yes
- name: Recursively find /tmp files older than 4 weeks and equal or greater than 1 megabyte
find:
paths: /tmp
age: 4w
size: 1m
recurse: yes
- name: Recursively find /var/tmp files with last access time greater than 3600 seconds
find:
paths: /var/tmp
age: 3600
age_stamp: atime
recurse: yes
- name: Find /var/log files equal or greater than 10 megabytes ending with .old or .log.gz
find:
paths: /var/log
patterns: '*.old,*.log.gz'
size: 10m
# Note that YAML double quotes require escaping backslashes but yaml single quotes do not.
- name: Find /var/log files equal or greater than 10 megabytes ending with .old or .log.gz via regex
find:
paths: /var/log
patterns: "^.*?\\.(?:old|log\\.gz)$"
size: 10m
use_regex: yes
- name: Find /var/log all directories, exclude nginx and mysql
find:
paths: /var/log
recurse: no
file_type: directory
excludes: 'nginx,mysql'
# When using patterns that contain a comma, make sure they are formatted as lists to avoid splitting the pattern
- name: Use a single pattern that contains a comma formatted as a list
find:
paths: /var/log
file_type: file
use_regex: yes
patterns: ['^_[0-9]{2,4}_.*.log$']
- name: Use multiple patterns that contain a comma formatted as a YAML list
find:
paths: /var/log
file_type: file
use_regex: yes
patterns:
- '^_[0-9]{2,4}_.*.log$'
- '^[a-z]{1,5}_.*log$'
'''
RETURN = r'''
files:
description: All matches found with the specified criteria (see stat module for full output of each dictionary)
returned: success
type: list
sample: [
{ path: "/var/tmp/test1",
mode: "0644",
"...": "...",
checksum: 16fac7be61a6e4591a33ef4b729c5c3302307523
},
{ path: "/var/tmp/test2",
"...": "..."
},
]
matched:
description: Number of matches
returned: success
type: str
sample: 14
examined:
description: Number of filesystem objects looked at
returned: success
type: str
sample: 34
'''
import fnmatch
import grp
import os
import pwd
import re
import stat
import sys
import time
from ansible.module_utils.basic import AnsibleModule
def pfilter(f, patterns=None, excludes=None, use_regex=False):
'''filter using glob patterns'''
if patterns is None and excludes is None:
return True
if use_regex:
if patterns and excludes is None:
for p in patterns:
r = re.compile(p)
if r.match(f):
return True
elif patterns and excludes:
for p in patterns:
r = re.compile(p)
if r.match(f):
for e in excludes:
r = re.compile(e)
if r.match(f):
return False
return True
else:
if patterns and excludes is None:
for p in patterns:
if fnmatch.fnmatch(f, p):
return True
elif patterns and excludes:
for p in patterns:
if fnmatch.fnmatch(f, p):
for e in excludes:
if fnmatch.fnmatch(f, e):
return False
return True
return False
def agefilter(st, now, age, timestamp):
'''filter files older than age'''
if age is None:
return True
elif age >= 0 and now - st.__getattribute__("st_%s" % timestamp) >= abs(age):
return True
elif age < 0 and now - st.__getattribute__("st_%s" % timestamp) <= abs(age):
return True
return False
def sizefilter(st, size):
'''filter files greater than size'''
if size is None:
return True
elif size >= 0 and st.st_size >= abs(size):
return True
elif size < 0 and st.st_size <= abs(size):
return True
return False
def contentfilter(fsname, pattern):
"""
Filter files which contain the given expression
:arg fsname: Filename to scan for lines matching a pattern
:arg pattern: Pattern to look for inside of line
:rtype: bool
:returns: True if one of the lines in fsname matches the pattern. Otherwise False
"""
if pattern is None:
return True
prog = re.compile(pattern)
try:
with open(fsname) as f:
for line in f:
if prog.match(line):
return True
except Exception:
pass
return False
def statinfo(st):
pw_name = ""
gr_name = ""
try: # user data
pw_name = pwd.getpwuid(st.st_uid).pw_name
except Exception:
pass
try: # group data
gr_name = grp.getgrgid(st.st_gid).gr_name
except Exception:
pass
return {
'mode': "%04o" % stat.S_IMODE(st.st_mode),
'isdir': stat.S_ISDIR(st.st_mode),
'ischr': stat.S_ISCHR(st.st_mode),
'isblk': stat.S_ISBLK(st.st_mode),
'isreg': stat.S_ISREG(st.st_mode),
'isfifo': stat.S_ISFIFO(st.st_mode),
'islnk': stat.S_ISLNK(st.st_mode),
'issock': stat.S_ISSOCK(st.st_mode),
'uid': st.st_uid,
'gid': st.st_gid,
'size': st.st_size,
'inode': st.st_ino,
'dev': st.st_dev,
'nlink': st.st_nlink,
'atime': st.st_atime,
'mtime': st.st_mtime,
'ctime': st.st_ctime,
'gr_name': gr_name,
'pw_name': pw_name,
'wusr': bool(st.st_mode & stat.S_IWUSR),
'rusr': bool(st.st_mode & stat.S_IRUSR),
'xusr': bool(st.st_mode & stat.S_IXUSR),
'wgrp': bool(st.st_mode & stat.S_IWGRP),
'rgrp': bool(st.st_mode & stat.S_IRGRP),
'xgrp': bool(st.st_mode & stat.S_IXGRP),
'woth': bool(st.st_mode & stat.S_IWOTH),
'roth': bool(st.st_mode & stat.S_IROTH),
'xoth': bool(st.st_mode & stat.S_IXOTH),
'isuid': bool(st.st_mode & stat.S_ISUID),
'isgid': bool(st.st_mode & stat.S_ISGID),
}
def main():
module = AnsibleModule(
argument_spec=dict(
paths=dict(type='list', required=True, aliases=['name', 'path']),
patterns=dict(type='list', default=['*'], aliases=['pattern']),
excludes=dict(type='list', aliases=['exclude']),
contains=dict(type='str'),
file_type=dict(type='str', default="file", choices=['any', 'directory', 'file', 'link']),
age=dict(type='str'),
age_stamp=dict(type='str', default="mtime", choices=['atime', 'ctime', 'mtime']),
size=dict(type='str'),
recurse=dict(type='bool', default=False),
hidden=dict(type='bool', default=False),
follow=dict(type='bool', default=False),
get_checksum=dict(type='bool', default=False),
use_regex=dict(type='bool', default=False),
depth=dict(type='int'),
),
supports_check_mode=True,
)
params = module.params
filelist = []
if params['age'] is None:
age = None
else:
# convert age to seconds:
m = re.match(r"^(-?\d+)(s|m|h|d|w)?$", params['age'].lower())
seconds_per_unit = {"s": 1, "m": 60, "h": 3600, "d": 86400, "w": 604800}
if m:
age = int(m.group(1)) * seconds_per_unit.get(m.group(2), 1)
else:
module.fail_json(age=params['age'], msg="failed to process age")
if params['size'] is None:
size = None
else:
# convert size to bytes:
m = re.match(r"^(-?\d+)(b|k|m|g|t)?$", params['size'].lower())
bytes_per_unit = {"b": 1, "k": 1024, "m": 1024**2, "g": 1024**3, "t": 1024**4}
if m:
size = int(m.group(1)) * bytes_per_unit.get(m.group(2), 1)
else:
module.fail_json(size=params['size'], msg="failed to process size")
now = time.time()
msg = ''
looked = 0
for npath in params['paths']:
npath = os.path.expanduser(os.path.expandvars(npath))
if os.path.isdir(npath):
''' ignore followlinks for python version < 2.6 '''
for root, dirs, files in (sys.version_info < (2, 6, 0) and os.walk(npath)) or os.walk(npath, followlinks=params['follow']):
if params['depth']:
depth = root.replace(npath.rstrip(os.path.sep), '').count(os.path.sep)
if files or dirs:
depth += 1
if depth > params['depth']:
del(dirs[:])
continue
looked = looked + len(files) + len(dirs)
for fsobj in (files + dirs):
fsname = os.path.normpath(os.path.join(root, fsobj))
if os.path.basename(fsname).startswith('.') and not params['hidden']:
continue
try:
st = os.lstat(fsname)
except Exception:
msg += "%s was skipped as it does not seem to be a valid file or it cannot be accessed\n" % fsname
continue
r = {'path': fsname}
if params['file_type'] == 'any':
if pfilter(fsobj, params['patterns'], params['excludes'], params['use_regex']) and agefilter(st, now, age, params['age_stamp']):
r.update(statinfo(st))
if stat.S_ISREG(st.st_mode) and params['get_checksum']:
r['checksum'] = module.sha1(fsname)
filelist.append(r)
elif stat.S_ISDIR(st.st_mode) and params['file_type'] == 'directory':
if pfilter(fsobj, params['patterns'], params['excludes'], params['use_regex']) and agefilter(st, now, age, params['age_stamp']):
r.update(statinfo(st))
filelist.append(r)
elif stat.S_ISREG(st.st_mode) and params['file_type'] == 'file':
if pfilter(fsobj, params['patterns'], params['excludes'], params['use_regex']) and \
agefilter(st, now, age, params['age_stamp']) and \
sizefilter(st, size) and contentfilter(fsname, params['contains']):
r.update(statinfo(st))
if params['get_checksum']:
r['checksum'] = module.sha1(fsname)
filelist.append(r)
elif stat.S_ISLNK(st.st_mode) and params['file_type'] == 'link':
if pfilter(fsobj, params['patterns'], params['excludes'], params['use_regex']) and agefilter(st, now, age, params['age_stamp']):
r.update(statinfo(st))
filelist.append(r)
if not params['recurse']:
break
else:
msg += "%s was skipped as it does not seem to be a valid directory or it cannot be accessed\n" % npath
matched = len(filelist)
module.exit_json(files=filelist, changed=False, msg=msg, matched=matched, examined=looked)
if __name__ == '__main__':
main()
|
t794104/ansible
|
lib/ansible/modules/files/find.py
|
Python
|
gpl-3.0
| 16,464
|
[
"Brian"
] |
a7fd8b74172aed9207f417fc54c4ee7f02650d3cb5f7c71927fa2cbf8b4251ee
|
import base64
import os
import re
import shutil
import subprocess
import tempfile
import urllib
from contextlib import contextmanager
from typing import Any, Dict, Iterable, Iterator, List, Optional, Sequence, Set, Tuple, Union
from unittest import TestResult, mock
import lxml.html
import orjson
from django.apps import apps
from django.conf import settings
from django.db import connection
from django.db.migrations.executor import MigrationExecutor
from django.db.migrations.state import StateApps
from django.db.utils import IntegrityError
from django.http import HttpRequest, HttpResponse
from django.test import TestCase
from django.test.client import BOUNDARY, MULTIPART_CONTENT, encode_multipart
from django.test.testcases import SerializeMixin
from django.urls import resolve
from django.utils import translation
from fakeldap import MockLDAP
from two_factor.models import PhoneDevice
from zerver.decorator import do_two_factor_login
from zerver.lib.actions import (
bulk_add_subscriptions,
bulk_remove_subscriptions,
check_send_message,
check_send_stream_message,
gather_subscriptions,
)
from zerver.lib.cache import bounce_key_prefix_for_testing
from zerver.lib.initial_password import initial_password
from zerver.lib.rate_limiter import bounce_redis_key_prefix_for_testing
from zerver.lib.sessions import get_session_dict_user
from zerver.lib.stream_subscription import get_stream_subscriptions_for_user
from zerver.lib.streams import (
create_stream_if_needed,
get_default_value_for_history_public_to_subscribers,
)
from zerver.lib.test_console_output import (
ExtraConsoleOutputFinder,
ExtraConsoleOutputInTestException,
TeeStderrAndFindExtraConsoleOutput,
TeeStdoutAndFindExtraConsoleOutput,
)
from zerver.lib.test_helpers import find_key_by_email, instrument_url
from zerver.lib.users import get_api_key
from zerver.lib.validator import check_string
from zerver.lib.webhooks.common import get_fixture_http_headers, standardize_headers
from zerver.models import (
Client,
Message,
Realm,
Recipient,
Stream,
Subscription,
UserProfile,
clear_supported_auth_backends_cache,
flush_per_request_caches,
get_client,
get_display_recipient,
get_realm,
get_realm_stream,
get_stream,
get_system_bot,
get_user,
get_user_by_delivery_email,
)
from zerver.openapi.openapi import validate_against_openapi_schema, validate_request
from zerver.tornado.event_queue import clear_client_event_queues_for_testing
if settings.ZILENCER_ENABLED:
from zilencer.models import get_remote_server_by_uuid
class UploadSerializeMixin(SerializeMixin):
"""
We cannot use override_settings to change upload directory because
because settings.LOCAL_UPLOADS_DIR is used in URL pattern and URLs
are compiled only once. Otherwise using a different upload directory
for conflicting test cases would have provided better performance
while providing the required isolation.
"""
lockfile = 'var/upload_lock'
@classmethod
def setUpClass(cls: Any, *args: Any, **kwargs: Any) -> None:
if not os.path.exists(cls.lockfile):
with open(cls.lockfile, 'w'): # nocoverage - rare locking case
pass
super().setUpClass(*args, **kwargs)
class ZulipTestCase(TestCase):
# Ensure that the test system just shows us diffs
maxDiff: Optional[int] = None
def setUp(self) -> None:
super().setUp()
self.API_KEYS: Dict[str, str] = {}
test_name = self.id()
bounce_key_prefix_for_testing(test_name)
bounce_redis_key_prefix_for_testing(test_name)
def tearDown(self) -> None:
super().tearDown()
# Important: we need to clear event queues to avoid leaking data to future tests.
clear_client_event_queues_for_testing()
clear_supported_auth_backends_cache()
flush_per_request_caches()
translation.activate(settings.LANGUAGE_CODE)
# Clean up after using fakeldap in LDAP tests:
if hasattr(self, 'mock_ldap') and hasattr(self, 'mock_initialize'):
if self.mock_ldap is not None:
self.mock_ldap.reset()
self.mock_initialize.stop()
def run(self, result: Optional[TestResult]=None) -> Optional[TestResult]: # nocoverage
if not settings.BAN_CONSOLE_OUTPUT:
return super().run(result)
extra_output_finder = ExtraConsoleOutputFinder()
with TeeStderrAndFindExtraConsoleOutput(extra_output_finder), TeeStdoutAndFindExtraConsoleOutput(extra_output_finder):
test_result = super().run(result)
if extra_output_finder.full_extra_output:
exception_message = f"""
---- UNEXPECTED CONSOLE OUTPUT DETECTED ----
To ensure that we never miss important error output/warnings,
we require test-backend to have clean console output.
This message usually is triggered by forgotten debugging print()
statements or new logging statements. For the latter, you can
use `with self.assertLogs()` to capture and verify the log output;
use `git grep assertLogs` to see dozens of correct examples.
You should be able to quickly reproduce this failure with:
test-backend --ban-console-output {self.id()}
Output:
{extra_output_finder.full_extra_output}
--------------------------------------------
"""
raise ExtraConsoleOutputInTestException(exception_message)
return test_result
'''
WRAPPER_COMMENT:
We wrap calls to self.client.{patch,put,get,post,delete} for various
reasons. Some of this has to do with fixing encodings before calling
into the Django code. Some of this has to do with providing a future
path for instrumentation. Some of it's just consistency.
The linter will prevent direct calls to self.client.foo, so the wrapper
functions have to fake out the linter by using a local variable called
django_client to fool the regext.
'''
DEFAULT_SUBDOMAIN = "zulip"
TOKENIZED_NOREPLY_REGEX = settings.TOKENIZED_NOREPLY_EMAIL_ADDRESS.format(token="[a-z0-9_]{24}")
def set_http_headers(self, kwargs: Dict[str, Any]) -> None:
if 'subdomain' in kwargs:
kwargs['HTTP_HOST'] = Realm.host_for_subdomain(kwargs['subdomain'])
del kwargs['subdomain']
elif 'HTTP_HOST' not in kwargs:
kwargs['HTTP_HOST'] = Realm.host_for_subdomain(self.DEFAULT_SUBDOMAIN)
# set User-Agent
if 'HTTP_AUTHORIZATION' in kwargs:
# An API request; use mobile as the default user agent
default_user_agent = "ZulipMobile/26.22.145 (iOS 10.3.1)"
else:
# A webapp request; use a browser User-Agent string.
default_user_agent = ("Mozilla/5.0 (Windows NT 10.0; Win64; x64) " +
"AppleWebKit/537.36 (KHTML, like Gecko) " +
"Chrome/79.0.3945.130 Safari/537.36")
if kwargs.get('skip_user_agent'):
# Provide a way to disable setting User-Agent if desired.
assert 'HTTP_USER_AGENT' not in kwargs
del kwargs['skip_user_agent']
elif 'HTTP_USER_AGENT' not in kwargs:
kwargs['HTTP_USER_AGENT'] = default_user_agent
def extract_api_suffix_url(self, url: str) -> Tuple[str, Dict[str, Any]]:
"""
Function that extracts the URL after `/api/v1` or `/json` and also
returns the query data in the URL, if there is any.
"""
url_split = url.split('?')
data: Dict[str, Any] = {}
if len(url_split) == 2:
data = urllib.parse.parse_qs(url_split[1])
url = url_split[0]
url = url.replace("/json/", "/").replace("/api/v1/", "/")
return (url, data)
def validate_api_response_openapi(
self,
url: str,
method: str,
result: HttpResponse,
data: Union[str, bytes, Dict[str, Any]],
http_headers: Dict[str, Any],
intentionally_undocumented: bool = False,
) -> None:
"""
Validates all API responses received by this test against Zulip's API documentation,
declared in zerver/openapi/zulip.yaml. This powerful test lets us use Zulip's
extensive test coverage of corner cases in the API to ensure that we've properly
documented those corner cases.
"""
if not (url.startswith("/json") or url.startswith("/api/v1")):
return
try:
content = orjson.loads(result.content)
except orjson.JSONDecodeError:
return
json_url = False
if url.startswith('/json'):
json_url = True
url, query_data = self.extract_api_suffix_url(url)
if len(query_data) != 0:
# In some cases the query parameters are defined in the URL itself. In such cases
# The `data` argument of our function is not used. Hence get `data` argument
# from url.
data = query_data
response_validated = validate_against_openapi_schema(content, url, method, str(result.status_code))
if response_validated:
validate_request(url, method, data, http_headers, json_url, str(result.status_code),
intentionally_undocumented=intentionally_undocumented)
@instrument_url
def client_patch(self, url: str, info: Dict[str, Any]={}, intentionally_undocumented: bool=False, **kwargs: Any) -> HttpResponse:
"""
We need to urlencode, since Django's function won't do it for us.
"""
encoded = urllib.parse.urlencode(info)
django_client = self.client # see WRAPPER_COMMENT
self.set_http_headers(kwargs)
result = django_client.patch(url, encoded, **kwargs)
self.validate_api_response_openapi(url, "patch", result, info, kwargs, intentionally_undocumented=intentionally_undocumented)
return result
@instrument_url
def client_patch_multipart(self, url: str, info: Dict[str, Any]={}, **kwargs: Any) -> HttpResponse:
"""
Use this for patch requests that have file uploads or
that need some sort of multi-part content. In the future
Django's test client may become a bit more flexible,
so we can hopefully eliminate this. (When you post
with the Django test client, it deals with MULTIPART_CONTENT
automatically, but not patch.)
"""
encoded = encode_multipart(BOUNDARY, info)
django_client = self.client # see WRAPPER_COMMENT
self.set_http_headers(kwargs)
result = django_client.patch(
url,
encoded,
content_type=MULTIPART_CONTENT,
**kwargs)
self.validate_api_response_openapi(url, "patch", result, info, kwargs)
return result
@instrument_url
def client_put(self, url: str, info: Dict[str, Any]={}, **kwargs: Any) -> HttpResponse:
encoded = urllib.parse.urlencode(info)
django_client = self.client # see WRAPPER_COMMENT
self.set_http_headers(kwargs)
return django_client.put(url, encoded, **kwargs)
@instrument_url
def client_delete(self, url: str, info: Dict[str, Any]={}, **kwargs: Any) -> HttpResponse:
encoded = urllib.parse.urlencode(info)
django_client = self.client # see WRAPPER_COMMENT
self.set_http_headers(kwargs)
result = django_client.delete(url, encoded, **kwargs)
self.validate_api_response_openapi(url, "delete", result, info, kwargs)
return result
@instrument_url
def client_options(self, url: str, info: Dict[str, Any]={}, **kwargs: Any) -> HttpResponse:
encoded = urllib.parse.urlencode(info)
django_client = self.client # see WRAPPER_COMMENT
self.set_http_headers(kwargs)
return django_client.options(url, encoded, **kwargs)
@instrument_url
def client_head(self, url: str, info: Dict[str, Any]={}, **kwargs: Any) -> HttpResponse:
encoded = urllib.parse.urlencode(info)
django_client = self.client # see WRAPPER_COMMENT
self.set_http_headers(kwargs)
return django_client.head(url, encoded, **kwargs)
@instrument_url
def client_post(
self,
url: str,
info: Union[str, bytes, Dict[str, Any]] = {},
**kwargs: Any,
) -> HttpResponse:
intentionally_undocumented: bool = kwargs.pop("intentionally_undocumented", False)
django_client = self.client # see WRAPPER_COMMENT
self.set_http_headers(kwargs)
result = django_client.post(url, info, **kwargs)
self.validate_api_response_openapi(url, "post", result, info, kwargs, intentionally_undocumented=intentionally_undocumented)
return result
@instrument_url
def client_post_request(self, url: str, req: Any) -> HttpResponse:
"""
We simulate hitting an endpoint here, although we
actually resolve the URL manually and hit the view
directly. We have this helper method to allow our
instrumentation to work for /notify_tornado and
future similar methods that require doing funny
things to a request object.
"""
match = resolve(url)
return match.func(req)
@instrument_url
def client_get(self, url: str, info: Dict[str, Any] = {}, **kwargs: Any) -> HttpResponse:
intentionally_undocumented: bool = kwargs.pop("intentionally_undocumented", False)
django_client = self.client # see WRAPPER_COMMENT
self.set_http_headers(kwargs)
result = django_client.get(url, info, **kwargs)
self.validate_api_response_openapi(url, "get", result, info, kwargs, intentionally_undocumented=intentionally_undocumented)
return result
example_user_map = dict(
hamlet='hamlet@zulip.com',
cordelia='cordelia@zulip.com',
iago='iago@zulip.com',
prospero='prospero@zulip.com',
othello='othello@zulip.com',
AARON='AARON@zulip.com',
aaron='aaron@zulip.com',
ZOE='ZOE@zulip.com',
polonius='polonius@zulip.com',
desdemona='desdemona@zulip.com',
webhook_bot='webhook-bot@zulip.com',
welcome_bot='welcome-bot@zulip.com',
outgoing_webhook_bot='outgoing-webhook@zulip.com',
default_bot='default-bot@zulip.com',
)
mit_user_map = dict(
sipbtest="sipbtest@mit.edu",
starnine="starnine@mit.edu",
espuser="espuser@mit.edu",
)
lear_user_map = dict(
cordelia="cordelia@zulip.com",
king="king@lear.org",
)
# Non-registered test users
nonreg_user_map = dict(
test='test@zulip.com',
test1='test1@zulip.com',
alice='alice@zulip.com',
newuser='newuser@zulip.com',
bob='bob@zulip.com',
cordelia='cordelia@zulip.com',
newguy='newguy@zulip.com',
me='me@zulip.com',
)
example_user_ldap_username_map = dict(
hamlet='hamlet',
cordelia='cordelia',
# aaron's uid in our test directory is "letham".
aaron='letham',
)
def nonreg_user(self, name: str) -> UserProfile:
email = self.nonreg_user_map[name]
return get_user_by_delivery_email(email, get_realm("zulip"))
def example_user(self, name: str) -> UserProfile:
email = self.example_user_map[name]
return get_user_by_delivery_email(email, get_realm('zulip'))
def mit_user(self, name: str) -> UserProfile:
email = self.mit_user_map[name]
return get_user(email, get_realm('zephyr'))
def lear_user(self, name: str) -> UserProfile:
email = self.lear_user_map[name]
return get_user(email, get_realm('lear'))
def nonreg_email(self, name: str) -> str:
return self.nonreg_user_map[name]
def example_email(self, name: str) -> str:
return self.example_user_map[name]
def mit_email(self, name: str) -> str:
return self.mit_user_map[name]
def notification_bot(self) -> UserProfile:
return get_system_bot(settings.NOTIFICATION_BOT)
def create_test_bot(self, short_name: str,
user_profile: UserProfile,
full_name: str='Foo Bot',
**extras: Any) -> UserProfile:
self.login_user(user_profile)
bot_info = {
'short_name': short_name,
'full_name': full_name,
}
bot_info.update(extras)
result = self.client_post("/json/bots", bot_info)
self.assert_json_success(result)
bot_email = f'{short_name}-bot@zulip.testserver'
bot_profile = get_user(bot_email, user_profile.realm)
return bot_profile
def fail_to_create_test_bot(
self, short_name: str,
user_profile: UserProfile,
full_name: str='Foo Bot',
*,
assert_json_error_msg: str,
**extras: Any,
) -> None:
self.login_user(user_profile)
bot_info = {
'short_name': short_name,
'full_name': full_name,
}
bot_info.update(extras)
result = self.client_post("/json/bots", bot_info)
self.assert_json_error(result, assert_json_error_msg)
def _get_page_params(self, result: HttpResponse) -> Dict[str, Any]:
"""Helper for parsing page_params after fetching the webapp's home view."""
doc = lxml.html.document_fromstring(result.content)
[div] = doc.xpath("//div[@id='page-params']")
page_params_json = div.get("data-params")
page_params = orjson.loads(page_params_json)
return page_params
def check_rendered_logged_in_app(self, result: HttpResponse) -> None:
"""Verifies that a visit of / was a 200 that rendered page_params
and not for a logged-out web-public visitor."""
self.assertEqual(result.status_code, 200)
page_params = self._get_page_params(result)
# It is important to check `is_web_public_visitor` to verify
# that we treated this request as a normal logged-in session,
# not as a web-public visitor.
self.assertEqual(page_params['is_web_public_visitor'], False)
def check_rendered_web_public_visitor(self, result: HttpResponse) -> None:
"""Verifies that a visit of / was a 200 that rendered page_params
for a logged-out web-public visitor."""
self.assertEqual(result.status_code, 200)
page_params = self._get_page_params(result)
# It is important to check `is_web_public_visitor` to verify
# that we treated this request to render for a `web_public_visitor`
self.assertEqual(page_params['is_web_public_visitor'], True)
def login_with_return(self, email: str, password: Optional[str]=None,
**kwargs: Any) -> HttpResponse:
if password is None:
password = initial_password(email)
result = self.client_post('/accounts/login/',
{'username': email, 'password': password},
**kwargs)
self.assertNotEqual(result.status_code, 500)
return result
def login(self, name: str) -> None:
'''
Use this for really simple tests where you just need
to be logged in as some user, but don't need the actual
user object for anything else. Try to use 'hamlet' for
non-admins and 'iago' for admins:
self.login('hamlet')
Try to use 'cordelia' or 'othello' as "other" users.
'''
assert '@' not in name, 'use login_by_email for email logins'
user = self.example_user(name)
self.login_user(user)
def login_by_email(self,
email: str,
password: str) -> None:
realm = get_realm("zulip")
request = HttpRequest()
request.session = self.client.session
self.assertTrue(
self.client.login(
request=request,
username=email,
password=password,
realm=realm,
),
)
def assert_login_failure(self,
email: str,
password: str) -> None:
realm = get_realm("zulip")
self.assertFalse(
self.client.login(
username=email,
password=password,
realm=realm,
),
)
def login_user(self, user_profile: UserProfile) -> None:
email = user_profile.delivery_email
realm = user_profile.realm
password = initial_password(email)
request = HttpRequest()
request.session = self.client.session
self.assertTrue(self.client.login(request=request, username=email, password=password,
realm=realm))
def login_2fa(self, user_profile: UserProfile) -> None:
"""
We need this function to call request.session.save().
do_two_factor_login doesn't save session; in normal request-response
cycle this doesn't matter because middleware will save the session
when it finds it dirty; however,in tests we will have to do that
explicitly.
"""
request = HttpRequest()
request.session = self.client.session
request.user = user_profile
do_two_factor_login(request, user_profile)
request.session.save()
def logout(self) -> None:
self.client.logout()
def register(self, email: str, password: str, **kwargs: Any) -> HttpResponse:
self.client_post('/accounts/home/', {'email': email},
**kwargs)
return self.submit_reg_form_for_user(email, password, **kwargs)
def submit_reg_form_for_user(
self, email: str, password: str,
realm_name: str="Zulip Test",
realm_subdomain: str="zuliptest",
from_confirmation: str='', full_name: Optional[str]=None,
timezone: str='', realm_in_root_domain: Optional[str]=None,
default_stream_groups: Sequence[str]=[],
source_realm: str='',
key: Optional[str]=None, **kwargs: Any) -> HttpResponse:
"""
Stage two of the two-step registration process.
If things are working correctly the account should be fully
registered after this call.
You can pass the HTTP_HOST variable for subdomains via kwargs.
"""
if full_name is None:
full_name = email.replace("@", "_")
payload = {
'full_name': full_name,
'password': password,
'realm_name': realm_name,
'realm_subdomain': realm_subdomain,
'key': key if key is not None else find_key_by_email(email),
'timezone': timezone,
'terms': True,
'from_confirmation': from_confirmation,
'default_stream_group': default_stream_groups,
'source_realm': source_realm,
}
if realm_in_root_domain is not None:
payload['realm_in_root_domain'] = realm_in_root_domain
return self.client_post('/accounts/register/', payload, **kwargs)
def get_confirmation_url_from_outbox(
self,
email_address: str,
*,
url_pattern: Optional[str]=None,
) -> str:
from django.core.mail import outbox
if url_pattern is None:
# This is a bit of a crude heuristic, but good enough for most tests.
url_pattern = settings.EXTERNAL_HOST + r"(\S+)>"
for message in reversed(outbox):
if any(
addr == email_address or addr.endswith(f" <{email_address}>")
for addr in message.to
):
match = re.search(url_pattern, message.body)
assert match is not None
[confirmation_url] = match.groups()
return confirmation_url
else:
raise AssertionError("Couldn't find a confirmation email.")
def encode_uuid(self, uuid: str) -> str:
"""
identifier: Can be an email or a remote server uuid.
"""
if uuid in self.API_KEYS:
api_key = self.API_KEYS[uuid]
else:
api_key = get_remote_server_by_uuid(uuid).api_key
self.API_KEYS[uuid] = api_key
return self.encode_credentials(uuid, api_key)
def encode_user(self, user: UserProfile) -> str:
email = user.delivery_email
api_key = user.api_key
return self.encode_credentials(email, api_key)
def encode_email(self, email: str, realm: str="zulip") -> str:
# TODO: use encode_user where possible
assert '@' in email
user = get_user_by_delivery_email(email, get_realm(realm))
api_key = get_api_key(user)
return self.encode_credentials(email, api_key)
def encode_credentials(self, identifier: str, api_key: str) -> str:
"""
identifier: Can be an email or a remote server uuid.
"""
credentials = f"{identifier}:{api_key}"
return 'Basic ' + base64.b64encode(credentials.encode('utf-8')).decode('utf-8')
def uuid_get(self, identifier: str, *args: Any, **kwargs: Any) -> HttpResponse:
kwargs['HTTP_AUTHORIZATION'] = self.encode_uuid(identifier)
return self.client_get(*args, **kwargs)
def uuid_post(self, identifier: str, *args: Any, **kwargs: Any) -> HttpResponse:
kwargs['HTTP_AUTHORIZATION'] = self.encode_uuid(identifier)
return self.client_post(*args, **kwargs)
def api_get(self, user: UserProfile, *args: Any, **kwargs: Any) -> HttpResponse:
kwargs['HTTP_AUTHORIZATION'] = self.encode_user(user)
return self.client_get(*args, **kwargs)
def api_post(self, user: UserProfile, *args: Any, intentionally_undocumented: bool=False, **kwargs: Any) -> HttpResponse:
kwargs['HTTP_AUTHORIZATION'] = self.encode_user(user)
return self.client_post(*args, intentionally_undocumented=intentionally_undocumented, **kwargs)
def api_patch(self, user: UserProfile, *args: Any, **kwargs: Any) -> HttpResponse:
kwargs['HTTP_AUTHORIZATION'] = self.encode_user(user)
return self.client_patch(*args, **kwargs)
def api_delete(self, user: UserProfile, *args: Any, **kwargs: Any) -> HttpResponse:
kwargs['HTTP_AUTHORIZATION'] = self.encode_user(user)
return self.client_delete(*args, **kwargs)
def get_streams(self, user_profile: UserProfile) -> List[str]:
"""
Helper function to get the stream names for a user
"""
subs = get_stream_subscriptions_for_user(user_profile).filter(
active=True,
)
return [
check_string("recipient", get_display_recipient(sub.recipient))
for sub in subs
]
def send_personal_message(self, from_user: UserProfile, to_user: UserProfile, content: str="test content",
sending_client_name: str="test suite") -> int:
recipient_list = [to_user.id]
(sending_client, _) = Client.objects.get_or_create(name=sending_client_name)
return check_send_message(
from_user, sending_client, 'private', recipient_list, None,
content,
)
def send_huddle_message(self,
from_user: UserProfile,
to_users: List[UserProfile],
content: str="test content",
sending_client_name: str="test suite") -> int:
to_user_ids = [u.id for u in to_users]
assert(len(to_user_ids) >= 2)
(sending_client, _) = Client.objects.get_or_create(name=sending_client_name)
return check_send_message(
from_user, sending_client, 'private', to_user_ids, None,
content,
)
def send_stream_message(self, sender: UserProfile, stream_name: str, content: str="test content",
topic_name: str="test",
recipient_realm: Optional[Realm]=None,
sending_client_name: str="test suite") -> int:
(sending_client, _) = Client.objects.get_or_create(name=sending_client_name)
return check_send_stream_message(
sender=sender,
client=sending_client,
stream_name=stream_name,
topic=topic_name,
body=content,
realm=recipient_realm,
)
def get_messages_response(self, anchor: Union[int, str]=1,
num_before: int=100, num_after: int=100,
use_first_unread_anchor: bool=False) -> Dict[str, List[Dict[str, Any]]]:
post_params = {"anchor": anchor, "num_before": num_before,
"num_after": num_after,
"use_first_unread_anchor": orjson.dumps(use_first_unread_anchor).decode()}
result = self.client_get("/json/messages", dict(post_params))
data = result.json()
return data
def get_messages(self, anchor: Union[str, int]=1, num_before: int=100, num_after: int=100,
use_first_unread_anchor: bool=False) -> List[Dict[str, Any]]:
data = self.get_messages_response(anchor, num_before, num_after, use_first_unread_anchor)
return data['messages']
def users_subscribed_to_stream(self, stream_name: str, realm: Realm) -> List[UserProfile]:
stream = Stream.objects.get(name=stream_name, realm=realm)
recipient = Recipient.objects.get(type_id=stream.id, type=Recipient.STREAM)
subscriptions = Subscription.objects.filter(recipient=recipient, active=True)
return [subscription.user_profile for subscription in subscriptions]
def assert_url_serves_contents_of_file(self, url: str, result: bytes) -> None:
response = self.client_get(url)
data = b"".join(response.streaming_content)
self.assertEqual(result, data)
def assert_json_success(self, result: HttpResponse) -> Dict[str, Any]:
"""
Successful POSTs return a 200 and JSON of the form {"result": "success",
"msg": ""}.
"""
try:
json = orjson.loads(result.content)
except orjson.JSONDecodeError: # nocoverage
json = {'msg': "Error parsing JSON in response!"}
self.assertEqual(result.status_code, 200, json['msg'])
self.assertEqual(json.get("result"), "success")
# We have a msg key for consistency with errors, but it typically has an
# empty value.
self.assertIn("msg", json)
self.assertNotEqual(json["msg"], "Error parsing JSON in response!")
return json
def get_json_error(self, result: HttpResponse, status_code: int=400) -> Dict[str, Any]:
try:
json = orjson.loads(result.content)
except orjson.JSONDecodeError: # nocoverage
json = {'msg': "Error parsing JSON in response!"}
self.assertEqual(result.status_code, status_code, msg=json.get('msg'))
self.assertEqual(json.get("result"), "error")
return json['msg']
def assert_json_error(self, result: HttpResponse, msg: str, status_code: int=400) -> None:
"""
Invalid POSTs return an error status code and JSON of the form
{"result": "error", "msg": "reason"}.
"""
self.assertEqual(self.get_json_error(result, status_code=status_code), msg)
def assert_length(self, items: List[Any], count: int) -> None:
actual_count = len(items)
if actual_count != count: # nocoverage
print('ITEMS:\n')
for item in items:
print(item)
print(f"\nexpected length: {count}\nactual length: {actual_count}")
raise AssertionError('List is unexpected size!')
def assert_json_error_contains(self, result: HttpResponse, msg_substring: str,
status_code: int=400) -> None:
self.assertIn(msg_substring, self.get_json_error(result, status_code=status_code))
def assert_in_response(self, substring: str, response: HttpResponse) -> None:
self.assertIn(substring, response.content.decode('utf-8'))
def assert_in_success_response(self, substrings: List[str],
response: HttpResponse) -> None:
self.assertEqual(response.status_code, 200)
decoded = response.content.decode('utf-8')
for substring in substrings:
self.assertIn(substring, decoded)
def assert_not_in_success_response(self, substrings: List[str],
response: HttpResponse) -> None:
self.assertEqual(response.status_code, 200)
decoded = response.content.decode('utf-8')
for substring in substrings:
self.assertNotIn(substring, decoded)
def assert_logged_in_user_id(self, user_id: Optional[int]) -> None:
"""
Verifies the user currently logged in for the test client has the provided user_id.
Pass None to verify no user is logged in.
"""
self.assertEqual(get_session_dict_user(self.client.session), user_id)
def webhook_fixture_data(self, type: str, action: str, file_type: str='json') -> str:
fn = os.path.join(
os.path.dirname(__file__),
f"../webhooks/{type}/fixtures/{action}.{file_type}",
)
with open(fn) as f:
return f.read()
def fixture_file_name(self, file_name: str, type: str='') -> str:
return os.path.join(
os.path.dirname(__file__),
f"../tests/fixtures/{type}/{file_name}",
)
def fixture_data(self, file_name: str, type: str='') -> str:
fn = self.fixture_file_name(file_name, type)
with open(fn) as f:
return f.read()
def make_stream(self, stream_name: str, realm: Optional[Realm]=None,
invite_only: bool=False,
is_web_public: bool=False,
history_public_to_subscribers: Optional[bool]=None) -> Stream:
if realm is None:
realm = get_realm('zulip')
history_public_to_subscribers = get_default_value_for_history_public_to_subscribers(
realm, invite_only, history_public_to_subscribers)
try:
stream = Stream.objects.create(
realm=realm,
name=stream_name,
invite_only=invite_only,
is_web_public=is_web_public,
history_public_to_subscribers=history_public_to_subscribers,
)
except IntegrityError: # nocoverage -- this is for bugs in the tests
raise Exception(f'''
{stream_name} already exists
Please call make_stream with a stream name
that is not already in use.''')
recipient = Recipient.objects.create(type_id=stream.id, type=Recipient.STREAM)
stream.recipient = recipient
stream.save(update_fields=["recipient"])
return stream
INVALID_STREAM_ID = 999999
def get_stream_id(self, name: str, realm: Optional[Realm]=None) -> int:
if not realm:
realm = get_realm('zulip')
try:
stream = get_realm_stream(name, realm.id)
except Stream.DoesNotExist:
return self.INVALID_STREAM_ID
return stream.id
# Subscribe to a stream directly
def subscribe(self, user_profile: UserProfile, stream_name: str) -> Stream:
realm = user_profile.realm
try:
stream = get_stream(stream_name, user_profile.realm)
except Stream.DoesNotExist:
stream, from_stream_creation = create_stream_if_needed(realm, stream_name)
bulk_add_subscriptions(realm, [stream], [user_profile])
return stream
def unsubscribe(self, user_profile: UserProfile, stream_name: str) -> None:
client = get_client("website")
stream = get_stream(stream_name, user_profile.realm)
bulk_remove_subscriptions([user_profile], [stream], client)
# Subscribe to a stream by making an API request
def common_subscribe_to_streams(self, user: UserProfile, streams: Iterable[str],
extra_post_data: Dict[str, Any]={}, invite_only: bool=False,
is_web_public: bool=False,
allow_fail: bool=False,
**kwargs: Any) -> HttpResponse:
post_data = {'subscriptions': orjson.dumps([{"name": stream} for stream in streams]).decode(),
'is_web_public': orjson.dumps(is_web_public).decode(),
'invite_only': orjson.dumps(invite_only).decode()}
post_data.update(extra_post_data)
result = self.api_post(user, "/api/v1/users/me/subscriptions", post_data, **kwargs)
if not allow_fail:
self.assert_json_success(result)
return result
def check_user_subscribed_only_to_streams(self, user_name: str,
streams: List[Stream]) -> None:
streams = sorted(streams, key=lambda x: x.name)
subscribed_streams = gather_subscriptions(self.nonreg_user(user_name))[0]
self.assertEqual(len(subscribed_streams), len(streams))
for x, y in zip(subscribed_streams, streams):
self.assertEqual(x["name"], y.name)
def send_webhook_payload(
self,
user_profile: UserProfile,
url: str,
payload: Union[str, Dict[str, Any]],
**post_params: Any,
) -> Message:
"""
Send a webhook payload to the server, and verify that the
post is successful.
This is a pretty low-level function. For most use cases
see the helpers that call this function, which do additional
checks.
Occasionally tests will call this directly, for unique
situations like having multiple messages go to a stream,
where the other helper functions are a bit too rigid,
and you'll want the test itself do various assertions.
Even in those cases, you're often better to simply
call client_post and assert_json_success.
If the caller expects a message to be sent to a stream,
the caller should make sure the user is subscribed.
"""
prior_msg = self.get_last_message()
result = self.client_post(url, payload, **post_params)
self.assert_json_success(result)
# Check the correct message was sent
msg = self.get_last_message()
if msg.id == prior_msg.id:
raise Exception('''
Your test code called an endpoint that did
not write any new messages. It is probably
broken (but still returns 200 due to exception
handling).
One possible gotcha is that you forgot to
subscribe the test user to the stream that
the webhook sends to.
''') # nocoverage
self.assertEqual(msg.sender.email, user_profile.email)
return msg
def get_last_message(self) -> Message:
return Message.objects.latest('id')
def get_second_to_last_message(self) -> Message:
return Message.objects.all().order_by('-id')[1]
@contextmanager
def simulated_markdown_failure(self) -> Iterator[None]:
'''
This raises a failure inside of the try/except block of
markdown.__init__.do_convert.
'''
with \
self.settings(ERROR_BOT=None), \
mock.patch('zerver.lib.markdown.timeout', side_effect=subprocess.CalledProcessError(1, [])), \
mock.patch('zerver.lib.markdown.markdown_logger'):
yield
def create_default_device(self, user_profile: UserProfile,
number: str="+12125550100") -> None:
phone_device = PhoneDevice(user=user_profile, name='default',
confirmed=True, number=number,
key='abcd', method='sms')
phone_device.save()
def rm_tree(self, path: str) -> None:
if os.path.exists(path):
shutil.rmtree(path)
def make_import_output_dir(self, exported_from: str) -> str:
output_dir = tempfile.mkdtemp(dir=settings.TEST_WORKER_DIR,
prefix="test-" + exported_from + "-import-")
os.makedirs(output_dir, exist_ok=True)
return output_dir
def get_set(self, data: List[Dict[str, Any]], field: str) -> Set[str]:
values = {r[field] for r in data}
return values
def find_by_id(self, data: List[Dict[str, Any]], db_id: int) -> Dict[str, Any]:
return [
r for r in data
if r['id'] == db_id][0]
def init_default_ldap_database(self) -> None:
"""
Takes care of the mock_ldap setup, loads
a directory from zerver/tests/fixtures/ldap/directory.json with various entries
to be used by tests.
If a test wants to specify its own directory, it can just replace
self.mock_ldap.directory with its own content, but in most cases it should be
enough to use change_user_attr to make simple modifications to the pre-loaded
directory. If new user entries are needed to test for some additional unusual
scenario, it's most likely best to add that to directory.json.
"""
directory = orjson.loads(self.fixture_data("directory.json", type="ldap"))
for dn, attrs in directory.items():
if 'uid' in attrs:
# Generate a password for the LDAP account:
attrs['userPassword'] = [self.ldap_password(attrs['uid'][0])]
# Load binary attributes. If in "directory", an attribute as its value
# has a string starting with "file:", the rest of the string is assumed
# to be a path to the file from which binary data should be loaded,
# as the actual value of the attribute in LDAP.
for attr, value in attrs.items():
if isinstance(value, str) and value.startswith("file:"):
with open(value[5:], 'rb') as f:
attrs[attr] = [f.read()]
ldap_patcher = mock.patch('django_auth_ldap.config.ldap.initialize')
self.mock_initialize = ldap_patcher.start()
self.mock_ldap = MockLDAP(directory)
self.mock_initialize.return_value = self.mock_ldap
def change_ldap_user_attr(self, username: str, attr_name: str, attr_value: Union[str, bytes],
binary: bool=False) -> None:
"""
Method for changing the value of an attribute of a user entry in the mock
directory. Use option binary=True if you want binary data to be loaded
into the attribute from a file specified at attr_value. This changes
the attribute only for the specific test function that calls this method,
and is isolated from other tests.
"""
dn = f"uid={username},ou=users,dc=zulip,dc=com"
if binary:
with open(attr_value, "rb") as f:
# attr_value should be a path to the file with the binary data
data: Union[str, bytes] = f.read()
else:
data = attr_value
self.mock_ldap.directory[dn][attr_name] = [data]
def ldap_username(self, username: str) -> str:
"""
Maps Zulip username to the name of the corresponding LDAP user
in our test directory at zerver/tests/fixtures/ldap/directory.json,
if the LDAP user exists.
"""
return self.example_user_ldap_username_map[username]
def ldap_password(self, uid: str) -> str:
return f"{uid}_ldap_password"
class WebhookTestCase(ZulipTestCase):
"""
Common for all webhooks tests
Override below class attributes and run send_and_test_message
If you create your URL in uncommon way you can override build_webhook_url method
In case that you need modify body or create it without using fixture you can also override get_body method
"""
STREAM_NAME: Optional[str] = None
TEST_USER_EMAIL = 'webhook-bot@zulip.com'
URL_TEMPLATE: str
FIXTURE_DIR_NAME: Optional[str] = None
@property
def test_user(self) -> UserProfile:
return get_user(self.TEST_USER_EMAIL, get_realm("zulip"))
def setUp(self) -> None:
super().setUp()
self.url = self.build_webhook_url()
def api_stream_message(self, user: UserProfile, *args: Any, **kwargs: Any) -> HttpResponse:
kwargs['HTTP_AUTHORIZATION'] = self.encode_user(user)
return self.check_webhook(*args, **kwargs)
def check_webhook(
self,
fixture_name: str,
expected_topic: str,
expected_message: str,
content_type: Optional[str]="application/json",
**kwargs: Any,
) -> None:
"""
check_webhook is the main way to test "normal" webhooks that
work by receiving a payload from a third party and then writing
some message to a Zulip stream.
We use `fixture_name` to find the payload data in of our test
fixtures. Then we verify that a message gets sent to a stream:
self.STREAM_NAME: stream name
expected_topic: topic
expected_message: content
We simulate the delivery of the payload with `content_type`,
and you can pass other headers via `kwargs`.
For the rare cases of webhooks actually sending private messages,
see send_and_test_private_message.
"""
assert self.STREAM_NAME is not None
self.subscribe(self.test_user, self.STREAM_NAME)
payload = self.get_payload(fixture_name)
if content_type is not None:
kwargs['content_type'] = content_type
if self.FIXTURE_DIR_NAME is not None:
headers = get_fixture_http_headers(self.FIXTURE_DIR_NAME, fixture_name)
headers = standardize_headers(headers)
kwargs.update(headers)
msg = self.send_webhook_payload(
self.test_user,
self.url,
payload,
**kwargs,
)
self.assert_stream_message(
message=msg,
stream_name=self.STREAM_NAME,
topic_name=expected_topic,
content=expected_message,
)
def assert_stream_message(
self,
message: Message,
stream_name: str,
topic_name: str,
content: str,
) -> None:
self.assertEqual(get_display_recipient(message.recipient), stream_name)
self.assertEqual(message.topic_name(), topic_name)
self.assertEqual(message.content, content)
def send_and_test_private_message(
self,
fixture_name: str,
expected_message: str,
content_type: str = "application/json",
**kwargs: Any,
) -> Message:
"""
For the rare cases that you are testing a webhook that sends
private messages, use this function.
Most webhooks send to streams, and you will want to look at
check_webhook.
"""
payload = self.get_payload(fixture_name)
kwargs['content_type'] = content_type
if self.FIXTURE_DIR_NAME is not None:
headers = get_fixture_http_headers(self.FIXTURE_DIR_NAME, fixture_name)
headers = standardize_headers(headers)
kwargs.update(headers)
# The sender profile shouldn't be passed any further in kwargs, so we pop it.
sender = kwargs.pop('sender', self.test_user)
msg = self.send_webhook_payload(
sender,
self.url,
payload,
**kwargs,
)
self.assertEqual(msg.content, expected_message)
return msg
def build_webhook_url(self, *args: Any, **kwargs: Any) -> str:
url = self.URL_TEMPLATE
if url.find("api_key") >= 0:
api_key = get_api_key(self.test_user)
url = self.URL_TEMPLATE.format(api_key=api_key,
stream=self.STREAM_NAME)
else:
url = self.URL_TEMPLATE.format(stream=self.STREAM_NAME)
has_arguments = kwargs or args
if has_arguments and url.find('?') == -1:
url = f"{url}?" # nocoverage
else:
url = f"{url}&"
for key, value in kwargs.items():
url = f"{url}{key}={value}&"
for arg in args:
url = f"{url}{arg}&"
return url[:-1] if has_arguments else url
def get_payload(self, fixture_name: str) -> Union[str, Dict[str, str]]:
"""
Generally webhooks that override this should return dicts."""
return self.get_body(fixture_name)
def get_body(self, fixture_name: str) -> str:
assert self.FIXTURE_DIR_NAME is not None
body = self.webhook_fixture_data(self.FIXTURE_DIR_NAME, fixture_name)
# fail fast if we don't have valid json
orjson.loads(body)
return body
class MigrationsTestCase(ZulipTestCase): # nocoverage
"""
Test class for database migrations inspired by this blog post:
https://www.caktusgroup.com/blog/2016/02/02/writing-unit-tests-django-migrations/
Documented at https://zulip.readthedocs.io/en/latest/subsystems/schema-migrations.html
"""
@property
def app(self) -> str:
return apps.get_containing_app_config(type(self).__module__).name
migrate_from: Optional[str] = None
migrate_to: Optional[str] = None
def setUp(self) -> None:
assert self.migrate_from and self.migrate_to, \
f"TestCase '{type(self).__name__}' must define migrate_from and migrate_to properties"
migrate_from: List[Tuple[str, str]] = [(self.app, self.migrate_from)]
migrate_to: List[Tuple[str, str]] = [(self.app, self.migrate_to)]
executor = MigrationExecutor(connection)
old_apps = executor.loader.project_state(migrate_from).apps
# Reverse to the original migration
executor.migrate(migrate_from)
self.setUpBeforeMigration(old_apps)
# Run the migration to test
executor = MigrationExecutor(connection)
executor.loader.build_graph() # reload.
executor.migrate(migrate_to)
self.apps = executor.loader.project_state(migrate_to).apps
def setUpBeforeMigration(self, apps: StateApps) -> None:
pass # nocoverage
|
showell/zulip
|
zerver/lib/test_classes.py
|
Python
|
apache-2.0
| 50,855
|
[
"VisIt"
] |
b6afb97df4e6c999a1633d0182538c6201d7604fee453f47df366f7c3ca8a072
|
# -*- coding: utf-8 -*-
#
# Gramps - a GTK+/GNOME based genealogy program
#
# Copyright (C) 2000-2002 Bruce J. DeGrasse
# Copyright (C) 2000-2007 Donald N. Allingham
# Copyright (C) 2007-2009 Brian G. Matherly
# Copyright (C) 2007 Robert Cawley <rjc@cawley.id.au>
# Copyright (C) 2008-2009 James Friedmann <jfriedmannj@gmail.com>
# Copyright (C) 2009 Benny Malengier <benny.malengier@gramps-project.org>
# Copyright (C) 2010 Jakim Friant
# Copyright (C) 2010 Vlada Perić <vlada.peric@gmail.com>
# Copyright (C) 2011 Matt Keenan <matt.keenan@gmail.com>
# Copyright (C) 2011 Tim G L Lyons
# Copyright (C) 2013-2014 Paul Franklin
# Copyright (C) 2017 Jon Schewe <jpschewe@mtu.net> - modifications to add all images
#
# This report is a modification of the detailed descendant report. It has
# been modified to include as many images as possible. This is useful for
# creating a report showing all pictures of people as well as sources and
# citations. There is also an option to display TODO notes as a separate
# list for each individual.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
"""Reports/Text Reports/Detailed Descendant Report"""
#------------------------------------------------------------------------
#
# standard python modules
#
#------------------------------------------------------------------------
from functools import partial
#------------------------------------------------------------------------
#
# GRAMPS modules
#
#------------------------------------------------------------------------
from gramps.gen.const import GRAMPS_LOCALE as glocale
_ = glocale.translation.gettext
from gramps.gen.errors import ReportError
from gramps.gen.lib import FamilyRelType, Person, NoteType
from gramps.gen.plug.menu import (BooleanOption, NumberOption, PersonOption,
EnumeratedListOption)
from gramps.gen.plug.docgen import (IndexMark, FontStyle, ParagraphStyle,
TableStyle, TableCellStyle,
FONT_SANS_SERIF, FONT_SERIF,
INDEX_TYPE_TOC, PARA_ALIGN_CENTER)
from gramps.gen.plug.report import Report, Bibliography
from gramps.gen.plug.report import endnotes
from gramps.gen.plug.report import utils as ReportUtils
from gramps.gen.plug.report import MenuReportOptions
from gramps.gen.plug.report import stdoptions
from gramps.plugins.lib.libnarrate import Narrator
from gramps.gen.display.place import displayer as place_displayer
#------------------------------------------------------------------------
#
# Constants
#
#------------------------------------------------------------------------
EMPTY_ENTRY = "_____________"
HENRY = "123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ"
#------------------------------------------------------------------------
#
#
#
#------------------------------------------------------------------------
class DetailedDescendantReportI(Report):
def __init__(self, database, options, user):
"""
Create the DetDescendantReport object that produces the report.
The arguments are:
database - the GRAMPS database instance
options - instance of the Options class for this report
user - a gen.user.User() instance
This report needs the following parameters (class variables)
that come in the options class.
gen - Maximum number of generations to include.
pagebgg - Whether to include page breaks between generations.
pageben - Whether to include page break before End Notes.
fulldates - Whether to use full dates instead of just year.
listc - Whether to list children.
incnotes - Whether to include notes.
inctodo - Whether to include todo notes.
usecall - Whether to use the call name as the first name.
repplace - Whether to replace missing Places with ___________.
repdate - Whether to replace missing Dates with ___________.
computeage - Whether to compute age.
omitda - Whether to omit duplicate ancestors
(e.g. when distant cousins marry).
verbose - Whether to use complete sentences.
numbering - The descendancy numbering system to be utilized.
desref - Whether to add descendant references in child list.
incphotos - Whether to include images.
incnames - Whether to include other names.
incevents - Whether to include events.
incaddresses - Whether to include addresses.
incsrcnotes - Whether to include source notes in the Endnotes
section. Only works if Include sources is selected.
incmates - Whether to include information about spouses
incattrs - Whether to include attributes
incpaths - Whether to include the path of descendancy
from the start-person to each descendant.
incssign - Whether to include a sign ('+') before the
descendant number in the child-list
to indicate a child has succession.
pid - The Gramps ID of the center person for the report.
name_format - Preferred format to display names
incmateref - Whether to print mate information or reference
incl_private - Whether to include private data
"""
Report.__init__(self, database, options, user)
self.map = {}
self._user = user
menu = options.menu
get_option_by_name = menu.get_option_by_name
get_value = lambda name: get_option_by_name(name).get_value()
stdoptions.run_private_data_option(self, menu)
self.db = self.database
self.max_generations = get_value('gen')
self.pgbrk = get_value('pagebbg')
self.pgbrkenotes = get_value('pageben')
self.fulldate = get_value('fulldates')
use_fulldate = self.fulldate
self.listchildren = get_value('listc')
self.inc_notes = get_value('incnotes')
self.inc_todo = get_value('inctodo')
use_call = get_value('usecall')
blankplace = get_value('repplace')
blankdate = get_value('repdate')
self.calcageflag = get_value('computeage')
self.dubperson = get_value('omitda')
self.verbose = get_value('verbose')
self.numbering = get_value('numbering')
self.childref = get_value('desref')
self.addimages = get_value('incphotos')
self.inc_names = get_value('incnames')
self.inc_events = get_value('incevents')
self.inc_addr = get_value('incaddresses')
self.inc_sources = get_value('incsources')
self.inc_srcnotes = get_value('incsrcnotes')
self.inc_mates = get_value('incmates')
self.inc_attrs = get_value('incattrs')
self.inc_paths = get_value('incpaths')
self.inc_ssign = get_value('incssign')
self.inc_materef = get_value('incmateref')
pid = get_value('pid')
self.center_person = self.db.get_person_from_gramps_id(pid)
if (self.center_person == None) :
raise ReportError(_("Person %s is not in the Database") % pid )
self.gen_handles = {}
self.prev_gen_handles = {}
self.gen_keys = []
self.dnumber = {}
self.dmates = {}
if blankdate:
empty_date = EMPTY_ENTRY
else:
empty_date = ""
if blankplace:
empty_place = EMPTY_ENTRY
else:
empty_place = ""
self._locale = self.set_locale(get_value('trans'))
stdoptions.run_name_format_option(self, menu)
self.__narrator = Narrator(self.db, self.verbose,
use_call, use_fulldate,
empty_date, empty_place,
nlocale=self._locale,
get_endnote_numbers=self.endnotes)
self.bibli = Bibliography(Bibliography.MODE_DATE|Bibliography.MODE_PAGE)
def apply_henry_filter(self,person_handle, index, pid, cur_gen=1):
if (not person_handle) or (cur_gen > self.max_generations):
return
self.dnumber[person_handle] = pid
self.map[index] = person_handle
if len(self.gen_keys) < cur_gen:
self.gen_keys.append([index])
else:
self.gen_keys[cur_gen-1].append(index)
person = self.db.get_person_from_handle(person_handle)
index = 0
for family_handle in person.get_family_handle_list():
family = self.db.get_family_from_handle(family_handle)
for child_ref in family.get_child_ref_list():
ix = max(self.map)
self.apply_henry_filter(child_ref.ref, ix+1,
pid+HENRY[index], cur_gen+1)
index += 1
# Filter for d'Aboville numbering
def apply_daboville_filter(self,person_handle, index, pid, cur_gen=1):
if (not person_handle) or (cur_gen > self.max_generations):
return
self.dnumber[person_handle] = pid
self.map[index] = person_handle
if len(self.gen_keys) < cur_gen:
self.gen_keys.append([index])
else:
self.gen_keys[cur_gen-1].append(index)
person = self.db.get_person_from_handle(person_handle)
index = 1
for family_handle in person.get_family_handle_list():
family = self.db.get_family_from_handle(family_handle)
for child_ref in family.get_child_ref_list():
ix = max(self.map)
self.apply_daboville_filter(child_ref.ref, ix+1,
pid+"."+str(index), cur_gen+1)
index += 1
# Filter for Record-style (Modified Register) numbering
def apply_mod_reg_filter_aux(self, person_handle, index, cur_gen=1):
if (not person_handle) or (cur_gen > self.max_generations):
return
self.map[index] = person_handle
if len(self.gen_keys) < cur_gen:
self.gen_keys.append([index])
else:
self.gen_keys[cur_gen-1].append(index)
person = self.db.get_person_from_handle(person_handle)
for family_handle in person.get_family_handle_list():
family = self.db.get_family_from_handle(family_handle)
for child_ref in family.get_child_ref_list():
ix = max(self.map)
self.apply_mod_reg_filter_aux(child_ref.ref, ix+1, cur_gen+1)
def apply_mod_reg_filter(self, person_handle):
self.apply_mod_reg_filter_aux(person_handle, 1, 1)
mod_reg_number = 1
for generation in range(len(self.gen_keys)):
for key in self.gen_keys[generation]:
person_handle = self.map[key]
if person_handle not in self.dnumber:
self.dnumber[person_handle] = mod_reg_number
mod_reg_number += 1
def write_report(self):
"""
This function is called by the report system and writes the report.
"""
if self.numbering == "Henry":
self.apply_henry_filter(self.center_person.get_handle(), 1, "1")
elif self.numbering == "d'Aboville":
self.apply_daboville_filter(self.center_person.get_handle(), 1, "1")
elif self.numbering == "Record (Modified Register)":
self.apply_mod_reg_filter(self.center_person.get_handle())
else:
raise AttributeError("no such numbering: '%s'" % self.numbering)
name = self._name_display.display_name(
self.center_person.get_primary_name())
if not name:
name = self._("Unknown")
self.doc.start_paragraph("DDRI-Title")
# feature request 2356: avoid genitive form
title = self._("Descendant Report for %(person_name)s") % {
'person_name' : name }
mark = IndexMark(title, INDEX_TYPE_TOC, 1)
self.doc.write_text(title, mark)
self.doc.end_paragraph()
generation = 0
self.numbers_printed = list()
for generation in range(len(self.gen_keys)):
if self.pgbrk and generation > 0:
self.doc.page_break()
self.doc.start_paragraph("DDRI-Generation")
text = self._("Generation %d") % (generation+1)
mark = IndexMark(text, INDEX_TYPE_TOC, 2)
self.doc.write_text(text, mark)
self.doc.end_paragraph()
if self.childref:
self.prev_gen_handles = self.gen_handles.copy()
self.gen_handles.clear()
for key in self.gen_keys[generation]:
person_handle = self.map[key]
self.gen_handles[person_handle] = key
self.write_person(key)
if self.inc_sources:
if self.pgbrkenotes:
self.doc.page_break()
# it ignores language set for Note type (use locale)
#endnotes.write_endnotes(self.bibli, self.db, self.doc,
# printnotes=self.inc_srcnotes,
# elocale=self._locale)
self.write_endnotes_with_media()
def write_path(self, person):
path = []
while True:
#person changes in the loop
family_handle = person.get_main_parents_family_handle()
if family_handle:
family = self.db.get_family_from_handle(family_handle)
mother_handle = family.get_mother_handle()
father_handle = family.get_father_handle()
if mother_handle and mother_handle in self.dnumber:
person = self.db.get_person_from_handle(mother_handle)
person_name = self._name_display.display_name(
person.get_primary_name())
path.append(person_name)
elif father_handle and father_handle in self.dnumber:
person = self.db.get_person_from_handle(father_handle)
person_name = self._name_display.display_name(
person.get_primary_name())
path.append(person_name)
else:
break
else:
break
index = len(path)
if index:
self.doc.write_text("(")
for name in path:
if index == 1:
self.doc.write_text(name + "-" + str(index) + ") ")
else:
# translators: needed for Arabic, ignore otherwise
self.doc.write_text(name + "-" + str(index) + self._("; "))
index -= 1
def write_person(self, key):
"""Output birth, death, parentage, marriage and notes information """
person_handle = self.map[key]
person = self.db.get_person_from_handle(person_handle)
val = self.dnumber[person_handle]
if val in self.numbers_printed:
return
else:
self.numbers_printed.append(val)
self.doc.start_paragraph("DDRI-First-Entry","%s." % val)
name = self._name_display.display(person)
if not name:
name = self._("Unknown")
mark = ReportUtils.get_person_mark(self.db, person)
self.doc.start_bold()
self.doc.write_text(name, mark)
if name[-1:] == '.':
self.doc.write_text_citation("%s " % self.endnotes(person))
elif name:
self.doc.write_text_citation("%s. " % self.endnotes(person))
self.doc.end_bold()
if self.inc_paths:
self.write_path(person)
if self.dubperson:
# Check for duplicate record (result of distant cousins marrying)
for dkey in sorted(self.map):
if dkey >= key:
break
if self.map[key] == self.map[dkey]:
self.doc.write_text(self._(
"%(name)s is the same person as [%(id_str)s].") % {
'name' :'',
'id_str': self.dnumber[self.map[dkey]],
}
)
self.doc.end_paragraph()
return
self.doc.end_paragraph()
self.write_person_info(person)
if (self.inc_mates or self.listchildren or self.inc_notes or
self.inc_events or self.inc_attrs):
for family_handle in person.get_family_handle_list():
family = self.db.get_family_from_handle(family_handle)
if self.inc_mates:
self.__write_mate(person, family)
if self.listchildren:
self.__write_children(family)
if self.inc_notes:
self.__write_family_notes(family)
first = True
if self.inc_events:
first = self.__write_family_events(family)
if self.inc_attrs:
self.__write_family_attrs(family, first)
def write_event(self, event_ref):
text = ""
event = self.db.get_event_from_handle(event_ref.ref)
if self.fulldate:
date = self._get_date(event.get_date_object())
else:
date = event.get_date_object().get_year()
place = place_displayer.display_event(self.db, event)
self.doc.start_paragraph('DDRI-MoreDetails')
event_name = self._get_type(event.get_type())
if date and place:
text += self._('%(date)s, %(place)s') % {
'date' : date, 'place' : place }
elif date:
text += self._('%(date)s') % {'date' : date}
elif place:
text += self._('%(place)s') % { 'place' : place }
if event.get_description():
if text:
text += ". "
text += event.get_description()
text += self.endnotes(event)
if text:
text += ". "
text = self._('%(event_name)s: %(event_text)s') % {
'event_name' : self._(event_name),
'event_text' : text }
self.doc.write_text_citation(text)
if self.inc_attrs:
text = ""
attr_list = event.get_attribute_list()
attr_list.extend(event_ref.get_attribute_list())
for attr in attr_list:
if text:
# translators: needed for Arabic, ignore otherwise
text += self._("; ")
attrName = self._get_type(attr.get_type())
text += self._("%(type)s: %(value)s%(endnotes)s") % {
'type' : self._(attrName),
'value' : attr.get_value(),
'endnotes' : self.endnotes(attr) }
text = " " + text
self.doc.write_text_citation(text)
self.doc.end_paragraph()
if self.inc_notes:
# if the event or event reference has a note attached to it,
# get the text and format it correctly
notelist = event.get_note_list()
notelist.extend(event_ref.get_note_list())
self.write_notes(notelist, "DDRI-MoreDetails")
def __write_parents(self, person):
family_handle = person.get_main_parents_family_handle()
if family_handle:
family = self.db.get_family_from_handle(family_handle)
mother_handle = family.get_mother_handle()
father_handle = family.get_father_handle()
if mother_handle:
mother = self.db.get_person_from_handle(mother_handle)
mother_name = self._name_display.display_name(
mother.get_primary_name())
mother_mark = ReportUtils.get_person_mark(self.db, mother)
else:
mother_name = ""
mother_mark = ""
if father_handle:
father = self.db.get_person_from_handle(father_handle)
father_name = self._name_display.display_name(
father.get_primary_name())
father_mark = ReportUtils.get_person_mark(self.db, father)
else:
father_name = ""
father_mark = ""
text = self.__narrator.get_child_string(father_name, mother_name)
if text:
self.doc.write_text(text)
if father_mark:
self.doc.write_text("", father_mark)
if mother_mark:
self.doc.write_text("", mother_mark)
def write_marriage(self, person):
"""
Output marriage sentence.
"""
is_first = True
for family_handle in person.get_family_handle_list():
family = self.db.get_family_from_handle(family_handle)
spouse_handle = ReportUtils.find_spouse(person, family)
if spouse_handle:
spouse = self.db.get_person_from_handle(spouse_handle)
spouse_mark = ReportUtils.get_person_mark(self.db, spouse)
else:
spouse_mark = None
text = self.__narrator.get_married_string(family,
is_first,
self._name_display)
if text:
self.doc.write_text_citation(text, spouse_mark)
is_first = False
def __write_mate(self, person, family):
"""
Write information about the person's spouse/mate.
"""
if person.get_gender() == Person.MALE:
mate_handle = family.get_mother_handle()
else:
mate_handle = family.get_father_handle()
if mate_handle:
mate = self.db.get_person_from_handle(mate_handle)
self.doc.start_paragraph("DDRI-MoreHeader")
name = self._name_display.display(mate)
if not name:
name = self._("Unknown")
mark = ReportUtils.get_person_mark(self.db, mate)
if family.get_relationship() == FamilyRelType.MARRIED:
self.doc.write_text(self._("Spouse: %s") % name, mark)
else:
self.doc.write_text(self._("Relationship with: %s")
% name, mark)
if name[-1:] != '.':
self.doc.write_text(".")
self.doc.write_text_citation(self.endnotes(mate))
self.doc.end_paragraph()
if not self.inc_materef:
# Don't want to just print reference
self.write_person_info(mate)
else:
# Check to see if we've married a cousin
if mate_handle in self.dnumber:
self.doc.start_paragraph('DDRI-MoreDetails')
self.doc.write_text_citation(
self._("Ref: %(number)s. %(name)s") %
{'number': self.dnumber[mate_handle],
'name': name})
self.doc.end_paragraph()
else:
self.dmates[mate_handle] = person.get_handle()
self.write_person_info(mate)
def __get_mate_names(self, family):
mother_handle = family.get_mother_handle()
if mother_handle:
mother = self.db.get_person_from_handle(mother_handle)
mother_name = self._name_display.display(mother)
if not mother_name:
mother_name = self._("Unknown")
else:
mother_name = self._("Unknown")
father_handle = family.get_father_handle()
if father_handle:
father = self.db.get_person_from_handle(father_handle)
father_name = self._name_display.display(father)
if not father_name:
father_name = self._("Unknown")
else:
father_name = self._("Unknown")
return mother_name, father_name
def __write_children(self, family):
"""
List the children for the given family.
"""
if not family.get_child_ref_list():
return
mother_name, father_name = self.__get_mate_names(family)
self.doc.start_paragraph("DDRI-ChildTitle")
self.doc.write_text(
self._("Children of %(mother_name)s and %(father_name)s") %
{'father_name': father_name,
'mother_name': mother_name } )
self.doc.end_paragraph()
cnt = 1
for child_ref in family.get_child_ref_list():
child_handle = child_ref.ref
child = self.db.get_person_from_handle(child_handle)
child_name = self._name_display.display(child)
if not child_name:
child_name = self._("Unknown")
child_mark = ReportUtils.get_person_mark(self.db, child)
if self.childref and self.prev_gen_handles.get(child_handle):
value = str(self.prev_gen_handles.get(child_handle))
child_name += " [%s]" % value
if self.inc_ssign:
prefix = " "
for family_handle in child.get_family_handle_list():
family = self.db.get_family_from_handle(family_handle)
if family.get_child_ref_list():
prefix = "+ "
break
else:
prefix = ""
if child_handle in self.dnumber:
self.doc.start_paragraph("DDRI-ChildList",
prefix
+ str(self.dnumber[child_handle])
+ " "
+ ReportUtils.roman(cnt).lower()
+ ".")
else:
self.doc.start_paragraph("DDRI-ChildList",
prefix + ReportUtils.roman(cnt).lower() + ".")
cnt += 1
self.doc.write_text("%s. " % child_name, child_mark)
self.__narrator.set_subject(child)
self.doc.write_text_citation(
self.__narrator.get_born_string() or
self.__narrator.get_christened_string() or
self.__narrator.get_baptised_string())
self.doc.write_text_citation(
self.__narrator.get_died_string() or
self.__narrator.get_buried_string())
self.doc.end_paragraph()
def __write_family_notes(self, family):
"""
Write the notes for the given family.
"""
notelist = family.get_note_list()
if len(notelist) > 0:
mother_name, father_name = self.__get_mate_names(family)
self.doc.start_paragraph("DDRI-NoteHeader")
self.doc.write_text(
self._('Notes for %(mother_name)s and %(father_name)s:') % {
'mother_name' : mother_name,
'father_name' : father_name })
self.doc.end_paragraph()
self.write_notes(notelist, "DDRI-Entry")
def __write_family_events(self, family):
"""
List the events for the given family.
"""
if not family.get_event_ref_list():
return
mother_name, father_name = self.__get_mate_names(family)
first = True
for event_ref in family.get_event_ref_list():
if first:
self.doc.start_paragraph('DDRI-MoreHeader')
self.doc.write_text(
self._('More about %(mother_name)s and %(father_name)s:')
% {'mother_name' : mother_name,
'father_name' : father_name })
self.doc.end_paragraph()
first = False
self.write_event(event_ref)
return first
def __write_family_attrs(self, family, first):
"""
List the attributes for the given family.
"""
attrs = family.get_attribute_list()
if first and attrs:
mother_name, father_name = self.__get_mate_names(family)
self.doc.start_paragraph('DDRI-MoreHeader')
self.doc.write_text(
self._('More about %(mother_name)s and %(father_name)s:')
% {'mother_name' : mother_name,
'father_name' : father_name })
self.doc.end_paragraph()
for attr in attrs:
self.doc.start_paragraph('DDRI-MoreDetails')
attrName = self._get_type(attr.get_type())
text = self._("%(type)s: %(value)s%(endnotes)s") % {
'type' : self._(attrName),
'value' : attr.get_value(),
'endnotes' : self.endnotes(attr) }
self.doc.write_text_citation( text )
self.doc.end_paragraph()
if self.inc_notes:
# if the attr or attr reference has a note attached to it,
# get the text and format it correctly
notelist = attr.get_note_list()
self.write_notes(notelist, "DDRI-MoreDetails")
def write_person_info(self, person):
name = self._name_display.display(person)
if not name:
name = self._("Unknown")
self.__narrator.set_subject(person)
if self.addimages:
self.write_images(person.get_media_list())
self.doc.start_paragraph("DDRI-Entry")
if not self.verbose:
self.__write_parents(person)
text = self.__narrator.get_born_string()
if text:
self.doc.write_text_citation(text)
text = self.__narrator.get_baptised_string()
if text:
self.doc.write_text_citation(text)
text = self.__narrator.get_christened_string()
if text:
self.doc.write_text_citation(text)
text = self.__narrator.get_died_string(self.calcageflag)
if text:
self.doc.write_text_citation(text)
text = self.__narrator.get_buried_string()
if text:
self.doc.write_text_citation(text)
if self.verbose:
self.__write_parents(person)
self.write_marriage(person)
self.doc.end_paragraph()
notelist = person.get_note_list()
if len(notelist) > 0 and self.inc_notes:
self.doc.start_paragraph("DDRI-NoteHeader")
# feature request 2356: avoid genitive form
self.doc.write_text(self._("Notes for %s") % name)
self.doc.end_paragraph()
self.write_notes(notelist, "DDRI-Entry")
first = True
if self.inc_names:
for alt_name in person.get_alternate_names():
if first:
self.doc.start_paragraph('DDRI-MoreHeader')
self.doc.write_text(self._('More about %(person_name)s:')
% {'person_name' : name })
self.doc.end_paragraph()
first = False
self.doc.start_paragraph('DDRI-MoreDetails')
atype = self._get_type(alt_name.get_type())
aname = alt_name.get_regular_name()
self.doc.write_text_citation(
self._('%(name_kind)s: %(name)s%(endnotes)s')
% {'name_kind' : self._(atype),
'name' : aname,
'endnotes' : self.endnotes(alt_name),
})
self.doc.end_paragraph()
if self.inc_events:
for event_ref in person.get_primary_event_ref_list():
if first:
self.doc.start_paragraph('DDRI-MoreHeader')
self.doc.write_text(self._('More about %(person_name)s:')
% {'person_name' : name })
self.doc.end_paragraph()
first = 0
self.write_event(event_ref)
if self.inc_addr:
for addr in person.get_address_list():
if first:
self.doc.start_paragraph('DDRI-MoreHeader')
self.doc.write_text(self._('More about %(person_name)s:')
% {'person_name' : name })
self.doc.end_paragraph()
first = False
self.doc.start_paragraph('DDRI-MoreDetails')
text = ReportUtils.get_address_str(addr)
if self.fulldate:
date = self._get_date(addr.get_date_object())
else:
date = addr.get_date_object().get_year()
self.doc.write_text(self._('Address: '))
if date:
# translators: needed for Arabic, ignore otherwise
self.doc.write_text(self._('%s, ') % date )
self.doc.write_text( text )
self.doc.write_text_citation( self.endnotes(addr) )
self.doc.end_paragraph()
if self.inc_attrs:
attrs = person.get_attribute_list()
if first and attrs:
self.doc.start_paragraph('DDRI-MoreHeader')
self.doc.write_text(self._('More about %(person_name)s:') % {
'person_name' : name })
self.doc.end_paragraph()
first = False
for attr in attrs:
self.doc.start_paragraph('DDRI-MoreDetails')
attrName = self._get_type(attr.get_type())
text = self._("%(type)s: %(value)s%(endnotes)s") % {
'type' : self._(attrName),
'value' : attr.get_value(),
'endnotes' : self.endnotes(attr) }
self.doc.write_text_citation( text )
self.doc.end_paragraph()
def endnotes(self, obj):
if not obj or not self.inc_sources:
return ""
txt = endnotes.cite_source(self.bibli, self.db, obj, self._locale)
if txt:
txt = '<super>' + txt + '</super>'
return txt
def write_notes(self, notelist, style_name):
"""
Write out the notes section
"""
# partition the list
non_todo = []
todo = []
for notehandle in notelist:
note = self.database.get_note_from_handle(notehandle)
if NoteType.TODO == note.get_type():
todo.append(notehandle)
else:
non_todo.append(notehandle)
# output regular notes first
for notehandle in non_todo:
note = self.database.get_note_from_handle(notehandle)
self.doc.write_styled_note(note.get_styledtext(),
note.get_format(),style_name)
# output todo notes under separate header
if self.inc_todo and len(todo) > 0:
self.doc.start_paragraph("DDRI-NoteHeader")
self.doc.write_text("TODO Notes:")
self.doc.end_paragraph()
for notehandle in todo:
note = self.database.get_note_from_handle(notehandle)
self.doc.write_styled_note(note.get_styledtext(),
note.get_format(),style_name)
def write_paragraph(self, text,
endnotes=None, mark=None, style='DDRI-NoteHeader'):
self.doc.start_paragraph(style)
self.doc.write_text(text, mark)
if endnotes:
self.doc.start_superscript()
self.doc.write_text(endnotes)
self.doc.end_superscript()
self.doc.end_paragraph()
def _cite_endnote(self, obj, prior=''):
if not self.inc_sources:
return ""
if not obj:
return prior
txt = endnotes.cite_source(self.bibli, self.db, obj, self._locale)
if not txt:
return prior
if prior:
# translators: needed for Arabic, ignore otherwise
txt = self._('%(str1)s, %(str2)s') % {'str1':prior, 'str2':txt}
return txt
def do_attributes(self, attr_list):
for attr in attr_list:
attr_type = attr.get_type().type2base()
# translators: needed for French, ignore otherwise
text = self._("%(type)s: %(value)s") % {
'type' : self._(attr_type),
'value' : attr.get_value() }
endnotes = self._cite_endnote(attr)
self.write_paragraph(text, endnotes)
def write_images(self, media_list):
"""
Write out all images in media_list as a table.
This is based on the code from the individual report.
@param media_list result of get_media_list from an object
"""
i_total = 0
for media_ref in media_list:
media_handle = media_ref.get_reference_handle()
if media_handle:
media = self.database.get_media_from_handle(media_handle)
if media and media.get_mime_type():
if media.get_mime_type().startswith("image"):
i_total += 1
if i_total == 0:
return
self.doc.start_table("images","DDRI-GalleryTable")
cells = 3 # the GalleryTable has 3 cells
self.doc.start_row()
self.doc.start_cell("DDRI-TableHead", cells)
self.write_paragraph(self._('Images'), style='DDRI-TableTitle')
self.doc.end_cell()
self.doc.end_row()
media_count = 0
image_count = 0
while ( media_count < len(media_list) ):
media_ref = media_list[media_count]
media_handle = media_ref.get_reference_handle()
media = self.database.get_media_from_handle(media_handle)
if media is None:
from gramps.gui.dialog import RunDatabaseRepair
RunDatabaseRepair(_('Non existing media found in the Gallery'))
return
mime_type = media.get_mime_type()
if not mime_type or not mime_type.startswith("image"):
media_count += 1
continue
description = media.get_description()
if image_count % cells == 0:
self.doc.start_row()
self.doc.start_cell('DDRI-NormalCell')
self.write_paragraph(description, style='DDRI-ImageCaptionCenter')
ReportUtils.insert_image(self.database, self.doc, media_ref, self._user,
align='center', w_cm=5.0, h_cm=5.0)
self.do_attributes(media.get_attribute_list() +
media_ref.get_attribute_list() )
self.doc.end_cell()
if image_count % cells == cells - 1:
self.doc.end_row()
media_count += 1
image_count += 1
if image_count % cells != 0:
self.doc.end_row()
self.doc.end_table()
self.doc.start_paragraph('DDRI-NoteHeader')
self.doc.end_paragraph()
def write_endnotes_with_media(self):
"""
Write all the entries in the bibliography as endnotes with the media.
This is copied from endnotes.py and modified to include media.
If elocale is passed in (a :class:`.GrampsLocale`), then (insofar as
possible) the translated values will be returned instead.
:param bibliography: The bibliography that contains the citations.
:type bibliography: :class:`.Bibliography`
:param database: The database that the sources come from.
:type database: DbBase
:param doc: The document to write the endnotes into.
:type doc: :class:`~.docgen.TextDoc`
:param printnotes: Indicate if the notes attached to a source must be
written too.
:type printnotes: bool
:param links: Indicate if URL links should be makde 'clickable'.
:type links: bool
:param elocale: allow deferred translation of dates and strings
:type elocale: a :class:`.GrampsLocale` instance
"""
bibliography = self.bibli
database = self.database
doc = self.doc
printnotes= self.inc_srcnotes
links=False
elocale=self._locale
if bibliography.get_citation_count() == 0:
return
trans_text = elocale.translation.gettext
# trans_text is a defined keyword (see po/update_po.py, po/genpot.sh)
doc.start_paragraph('Endnotes-Header')
doc.write_text(trans_text('Endnotes'))
doc.end_paragraph()
cindex = 0
for citation in bibliography.get_citation_list():
cindex += 1
source = database.get_source_from_handle(citation.get_source_handle())
first = True
doc.start_paragraph('Endnotes-Source', "%d." % cindex)
doc.write_text(endnotes._format_source_text(source, elocale), links=links)
doc.end_paragraph()
if printnotes:
endnotes._print_notes(source, database, doc,
'Endnotes-Source-Notes', links)
citation_plist = source.get_media_list()
if self.addimages:
self.write_images(citation_plist)
for key, ref in citation.get_ref_list():
# translators: needed for French, ignore otherwise
doc.start_paragraph('Endnotes-Ref', trans_text('%s:') % key)
doc.write_text(endnotes._format_ref_text(ref, key, elocale), links=links)
doc.end_paragraph()
if printnotes:
endnotes._print_notes(ref, database, doc,
'Endnotes-Ref-Notes', links)
ref_plist = ref.get_media_list()
if self.addimages:
self.write_images(ref_plist)
#------------------------------------------------------------------------
#
# DetDescendantOptions
#
#------------------------------------------------------------------------
class DetailedDescendantIOptions(MenuReportOptions):
"""
Defines options and provides handling interface.
"""
def __init__(self, name, dbase):
MenuReportOptions.__init__(self, name, dbase)
def add_menu_options(self, menu):
"""
Add options to the menu for the detailed descendant report.
"""
# Report Options
category = _("Report Options")
add_option = partial(menu.add_option, category)
pid = PersonOption(_("Center Person"))
pid.set_help(_("The center person for the report"))
add_option("pid", pid)
stdoptions.add_name_format_option(menu, category)
stdoptions.add_private_data_option(menu, category)
numbering = EnumeratedListOption(_("Numbering system"), "Henry")
numbering.set_items([
("Henry", _("Henry numbering")),
("d'Aboville", _("d'Aboville numbering")),
("Record (Modified Register)",
_("Record (Modified Register) numbering"))])
numbering.set_help(_("The numbering system to be used"))
add_option("numbering", numbering)
generations = NumberOption(_("Generations"), 10, 1, 100)
generations.set_help(
_("The number of generations to include in the report")
)
add_option("gen", generations)
pagebbg = BooleanOption(_("Page break between generations"), False)
pagebbg.set_help(
_("Whether to start a new page after each generation."))
add_option("pagebbg", pagebbg)
pageben = BooleanOption(_("Page break before end notes"),False)
pageben.set_help(
_("Whether to start a new page before the end notes."))
add_option("pageben", pageben)
stdoptions.add_localization_option(menu, category)
# Content
add_option = partial(menu.add_option, _("Content"))
usecall = BooleanOption(_("Use callname for common name"), False)
usecall.set_help(_("Whether to use the call name as the first name."))
add_option("usecall", usecall)
fulldates = BooleanOption(_("Use full dates instead of only the year"),
True)
fulldates.set_help(_("Whether to use full dates instead of just year."))
add_option("fulldates", fulldates)
listc = BooleanOption(_("List children"), True)
listc.set_help(_("Whether to list children."))
add_option("listc", listc)
computeage = BooleanOption(_("Compute death age"),True)
computeage.set_help(_("Whether to compute a person's age at death."))
add_option("computeage", computeage)
omitda = BooleanOption(_("Omit duplicate ancestors"), True)
omitda.set_help(_("Whether to omit duplicate ancestors."))
add_option("omitda", omitda)
verbose = BooleanOption(_("Use complete sentences"), True)
verbose.set_help(
_("Whether to use complete sentences or succinct language."))
add_option("verbose", verbose)
desref = BooleanOption(_("Add descendant reference in child list"),
True)
desref.set_help(
_("Whether to add descendant references in child list."))
add_option("desref", desref)
category_name = _("Include")
add_option = partial(menu.add_option, _("Include"))
incnotes = BooleanOption(_("Include notes"), True)
incnotes.set_help(_("Whether to include notes."))
add_option("incnotes", incnotes)
inctodo = BooleanOption(_("Include TODO notes"), True)
inctodo.set_help(_("Whether to include TODO notes."))
add_option("inctodo", inctodo)
incattrs = BooleanOption(_("Include attributes"), False)
incattrs.set_help(_("Whether to include attributes."))
add_option("incattrs", incattrs)
incphotos = BooleanOption(_("Include Photo/Images from Gallery"), True)
incphotos.set_help(_("Whether to include images."))
add_option("incphotos", incphotos)
incnames = BooleanOption(_("Include alternative names"), False)
incnames.set_help(_("Whether to include other names."))
add_option("incnames", incnames)
incevents = BooleanOption(_("Include events"), False)
incevents.set_help(_("Whether to include events."))
add_option("incevents", incevents)
incaddresses = BooleanOption(_("Include addresses"), False)
incaddresses.set_help(_("Whether to include addresses."))
add_option("incaddresses", incaddresses)
incsources = BooleanOption(_("Include sources"), False)
incsources.set_help(_("Whether to include source references."))
add_option("incsources", incsources)
incsrcnotes = BooleanOption(_("Include sources notes"), False)
incsrcnotes.set_help(_("Whether to include source notes in the "
"Endnotes section. Only works if Include sources is selected."))
add_option("incsrcnotes", incsrcnotes)
incmates = BooleanOption(_("Include spouses"), False)
incmates.set_help(_("Whether to include detailed spouse information."))
add_option("incmates", incmates)
incmateref = BooleanOption(_("Include spouse reference"), False)
incmateref.set_help(_("Whether to include reference to spouse."))
add_option("incmateref", incmateref)
incssign = BooleanOption(_("Include sign of succession ('+')"
" in child-list"), True)
incssign.set_help(_("Whether to include a sign ('+') before the"
" descendant number in the child-list to indicate"
" a child has succession."))
add_option("incssign", incssign)
incpaths = BooleanOption(_("Include path to start-person"), False)
incpaths.set_help(_("Whether to include the path of descendancy "
"from the start-person to each descendant."))
add_option("incpaths", incpaths)
# Missing information
add_option = partial(menu.add_option, _("Missing information"))
repplace = BooleanOption(_("Replace missing places with ______"), False)
repplace.set_help(_("Whether to replace missing Places with blanks."))
add_option("repplace", repplace)
repdate = BooleanOption(_("Replace missing dates with ______"), False)
repdate.set_help(_("Whether to replace missing Dates with blanks."))
add_option("repdate", repdate)
def make_default_style(self, default_style):
"""Make the default output style for the Detailed Ancestral Report"""
font = FontStyle()
font.set(face=FONT_SANS_SERIF, size=16, bold=1)
para = ParagraphStyle()
para.set_font(font)
para.set_header_level(1)
para.set_top_margin(0.25)
para.set_bottom_margin(0.25)
para.set_alignment(PARA_ALIGN_CENTER)
para.set_description(_('The style used for the title of the page.'))
default_style.add_paragraph_style("DDRI-Title", para)
font = FontStyle()
font.set(face=FONT_SANS_SERIF, size=14, italic=1)
para = ParagraphStyle()
para.set_font(font)
para.set_header_level(2)
para.set_top_margin(0.25)
para.set_bottom_margin(0.25)
para.set_description(_('The style used for the generation header.'))
default_style.add_paragraph_style("DDRI-Generation", para)
font = FontStyle()
font.set(face=FONT_SANS_SERIF, size=10, italic=0, bold=1)
para = ParagraphStyle()
para.set_font(font)
para.set_left_margin(1.5) # in centimeters
para.set_top_margin(0.25)
para.set_bottom_margin(0.25)
para.set_description(_('The style used for the children list title.'))
default_style.add_paragraph_style("DDRI-ChildTitle", para)
font = FontStyle()
font.set(size=10)
para = ParagraphStyle()
para.set_font(font)
para.set(first_indent=-0.75, lmargin=2.25)
para.set_top_margin(0.125)
para.set_bottom_margin(0.125)
para.set_description(_('The style used for the children list.'))
default_style.add_paragraph_style("DDRI-ChildList", para)
font = FontStyle()
font.set(face=FONT_SANS_SERIF, size=10, italic=0, bold=1)
para = ParagraphStyle()
para.set_font(font)
para.set(first_indent=0.0, lmargin=1.5)
para.set_top_margin(0.25)
para.set_bottom_margin(0.25)
default_style.add_paragraph_style("DDRI-NoteHeader", para)
para = ParagraphStyle()
para.set(lmargin=1.5)
para.set_top_margin(0.25)
para.set_bottom_margin(0.25)
para.set_description(_('The basic style used for the text display.'))
default_style.add_paragraph_style("DDRI-Entry", para)
para = ParagraphStyle()
para.set(first_indent=-1.5, lmargin=1.5)
para.set_top_margin(0.25)
para.set_bottom_margin(0.25)
para.set_description(_('The style used for the first personal entry.'))
default_style.add_paragraph_style("DDRI-First-Entry", para)
font = FontStyle()
font.set(size=10, face=FONT_SANS_SERIF, bold=1)
para = ParagraphStyle()
para.set_font(font)
para.set(first_indent=0.0, lmargin=1.5)
para.set_top_margin(0.25)
para.set_bottom_margin(0.25)
para.set_description(_('The style used for the More About header and '
'for headers of mates.'))
default_style.add_paragraph_style("DDRI-MoreHeader", para)
font = FontStyle()
font.set(face=FONT_SERIF, size=10)
para = ParagraphStyle()
para.set_font(font)
para.set(first_indent=0.0, lmargin=1.5)
para.set_top_margin(0.25)
para.set_bottom_margin(0.25)
para.set_description(_('The style used for additional detail data.'))
default_style.add_paragraph_style("DDRI-MoreDetails", para)
tbl = TableStyle()
tbl.set_width(100)
tbl.set_columns(3)
tbl.set_column_width(0, 33)
tbl.set_column_width(1, 33)
tbl.set_column_width(2, 34)
default_style.add_table_style("DDRI-GalleryTable", tbl)
cell = TableCellStyle()
cell.set_top_border(1)
cell.set_bottom_border(1)
default_style.add_cell_style("DDRI-TableHead", cell)
font = FontStyle()
font.set_bold(1)
font.set_type_face(FONT_SANS_SERIF)
font.set_size(12)
font.set_italic(1)
para = ParagraphStyle()
para.set_font(font)
para.set_top_margin(ReportUtils.pt2cm(3))
para.set_bottom_margin(ReportUtils.pt2cm(3))
para.set_description(_("The style used for image labels."))
default_style.add_paragraph_style("DDRI-TableTitle", para)
cell = TableCellStyle()
default_style.add_cell_style("DDRI-NormalCell", cell)
font = FontStyle()
font.set_size(8)
para = ParagraphStyle()
para.set_alignment(PARA_ALIGN_CENTER)
para.set_font(font)
para.set_top_margin(ReportUtils.pt2cm(3))
para.set_bottom_margin(ReportUtils.pt2cm(3))
para.set_description(_('A style used for image captions.'))
default_style.add_paragraph_style("DDRI-ImageCaptionCenter", para)
endnotes.add_endnote_styles(default_style)
|
gramps-project/addons-source
|
DetDescendantReport-images/detdescendantreporti.py
|
Python
|
gpl-2.0
| 55,465
|
[
"Brian"
] |
98db80548dd353f94220ff96283f3f4688f8d3687e8c2e3da3fc2d01d311d70c
|
from setuptools import setup
setup(
name='ipyaudio',
version='0.0',
description='Bridge PortAudio, IPython interactive widgets, and DSP callbacks',
author='Brian McFee',
author_email='brian.mcfee@nyu.edu',
url='http://github.com/bmcfee/ipyaudio',
download_url='http://github.com/bmcfee/ipyaudio/releases',
long_description="""\
Bridge PortAudio, IPython interactive widgets, and DSP callbacks
""",
packages=['ipyaudio'],
classifiers=[
"License :: OSI Approved :: MIT License",
"Programming Language :: Python",
"Development Status :: 3 - Alpha",
"Intended Audience :: Developers",
"Topic :: Multimedia :: Sound/Audio :: Analysis",
],
keywords='audio realtime interactive ipython',
license='MIT',
install_requires=[
'IPython >= 2.0',
'PyAudio >= 0.2.8',
'numpy'
],
)
|
bmcfee/ipyaudio
|
setup.py
|
Python
|
mit
| 903
|
[
"Brian"
] |
fdb65b1f8e2c769e3b2bb64d0acb275118381e89f52ef69ea44b4a814ec4404b
|
# -*- coding: utf-8 -*-
from __future__ import division, print_function
import os
import math
import time
import itertools
import functools
import collections
import sys
import platform
import warnings
import re
from functools import reduce
import threading
import six
import vaex.utils
# import vaex.image
import numpy as np
import concurrent.futures
import numbers
import pyarrow as pa
from vaex.utils import Timer
import vaex.events
# import vaex.ui.undo
import vaex.grids
import vaex.multithreading
import vaex.promise
import vaex.execution
import vaex.expresso
import logging
import vaex.kld
from . import selections, tasks, scopes
from .expression import expression_namespace
from .delayed import delayed, delayed_args, delayed_list
from .column import Column, ColumnIndexed, ColumnSparse, ColumnString, ColumnConcatenatedLazy, supported_column_types
from . import array_types
import vaex.events
# py2/p3 compatibility
try:
from urllib.parse import urlparse
except ImportError:
from urlparse import urlparse
_DEBUG = os.environ.get('VAEX_DEBUG', False) # extra sanify checks that might hit performance
DEFAULT_REPR_FORMAT = 'plain'
FILTER_SELECTION_NAME = '__filter__'
sys_is_le = sys.byteorder == 'little'
logger = logging.getLogger("vaex")
lock = threading.Lock()
default_shape = 128
default_chunk_size = 1_000_000
# executor = concurrent.futures.ThreadPoolExecutor(max_workers=2)
# executor = vaex.execution.default_executor
def _len(o):
return o.__len__()
def _requires(name):
def wrap(*args, **kwargs):
raise RuntimeError('this function is wrapped by a placeholder, you probably want to install vaex-' + name)
return wrap
from .utils import (_ensure_strings_from_expressions,
_ensure_string_from_expression,
_ensure_list,
_is_limit,
_isnumber,
_issequence,
_is_string,
_parse_reduction,
_parse_n,
_normalize_selection_name,
_normalize,
_parse_f,
_expand,
_expand_shape,
_expand_limits,
as_flat_float,
as_flat_array,
_split_and_combine_mask)
main_executor = None # vaex.execution.Executor(vaex.multithreading.pool)
from vaex.execution import Executor
def get_main_executor():
global main_executor
if main_executor is None:
main_executor = vaex.execution.ExecutorLocal(vaex.multithreading.get_main_pool())
return main_executor
# we import after function_mapping is defined
from .expression import Expression
_doc_snippets = {}
_doc_snippets["expression"] = "expression or list of expressions, e.g. df.x, 'x', or ['x, 'y']"
_doc_snippets["expression_one"] = "expression in the form of a string, e.g. 'x' or 'x+y' or vaex expression object, e.g. df.x or df.x+df.y "
_doc_snippets["expression_single"] = "if previous argument is not a list, this argument should be given"
_doc_snippets["binby"] = "List of expressions for constructing a binned grid"
_doc_snippets["limits"] = """description for the min and max values for the expressions, e.g. 'minmax' (default), '99.7%', [0, 10], or a list of, e.g. [[0, 10], [0, 20], 'minmax']"""
_doc_snippets["shape"] = """shape for the array where the statistic is calculated on, if only an integer is given, it is used for all dimensions, e.g. shape=128, shape=[128, 256]"""
_doc_snippets["percentile_limits"] = """description for the min and max values to use for the cumulative histogram, should currently only be 'minmax'"""
_doc_snippets["percentile_shape"] = """shape for the array where the cumulative histogram is calculated on, integer type"""
_doc_snippets["selection"] = """Name of selection to use (or True for the 'default'), or all the data (when selection is None or False), or a list of selections"""
_doc_snippets["selection1"] = """Name of selection to use (or True for the 'default'), or all the data (when selection is None or False)"""
_doc_snippets["delay"] = """Do not return the result, but a proxy for delayhronous calculations (currently only for internal use)"""
_doc_snippets["progress"] = """A callable that takes one argument (a floating point value between 0 and 1) indicating the progress, calculations are cancelled when this callable returns False"""
_doc_snippets["expression_limits"] = _doc_snippets["expression"]
_doc_snippets["grid"] = """If grid is given, instead if compuation a statistic given by what, use this Nd-numpy array instead, this is often useful when a custom computation/statistic is calculated, but you still want to use the plotting machinery."""
_doc_snippets["edges"] = """Currently for internal use only (it includes nan's and values outside the limits at borders, nan and 0, smaller than at 1, and larger at -1"""
_doc_snippets["healpix_expression"] = """Expression which maps to a healpix index, for the Gaia catalogue this is for instance 'source_id/34359738368', other catalogues may simply have a healpix column."""
_doc_snippets["healpix_max_level"] = """The healpix level associated to the healpix_expression, for Gaia this is 12"""
_doc_snippets["healpix_level"] = """The healpix level to use for the binning, this defines the size of the first dimension of the grid."""
_doc_snippets["return_stat_scalar"] = """Numpy array with the given shape, or a scalar when no binby argument is given, with the statistic"""
_doc_snippets["return_limits"] = """List in the form [[xmin, xmax], [ymin, ymax], .... ,[zmin, zmax]] or [xmin, xmax] when expression is not a list"""
_doc_snippets["cov_matrix"] = """List all convariance values as a double list of expressions, or "full" to guess all entries (which gives an error when values are not found), or "auto" to guess, but allow for missing values"""
_doc_snippets['propagate_uncertainties'] = """If true, will propagate errors for the new virtual columns, see :meth:`propagate_uncertainties` for details"""
_doc_snippets['note_copy'] = '.. note:: Note that no copy of the underlying data is made, only a view/reference is made.'
_doc_snippets['note_filter'] = '.. note:: Note that filtering will be ignored (since they may change), you may want to consider running :meth:`extract` first.'
_doc_snippets['inplace'] = 'Make modifications to self or return a new DataFrame'
_doc_snippets['return_shallow_copy'] = 'Returns a new DataFrame with a shallow copy/view of the underlying data'
_doc_snippets['chunk_size'] = 'Return an iterator with cuts of the object in lenght of this size'
_doc_snippets['chunk_size_export'] = 'Number of rows to be written to disk in a single iteration'
_doc_snippets['evaluate_parallel'] = 'Evaluate the (virtual) columns in parallel'
_doc_snippets['array_type'] = 'Type of output array, possible values are None/"numpy" (ndarray), "xarray" for a xarray.DataArray, or "list" for a Python list'
_doc_snippets['ascii'] = 'Transform only ascii characters (usually faster).'
def docsubst(f):
if f.__doc__:
f.__doc__ = f.__doc__.format(**_doc_snippets)
return f
_functions_statistics_1d = []
def stat_1d(f):
_functions_statistics_1d.append(f)
return f
def _hidden(meth):
"""Mark a method as hidden"""
meth.__hidden__ = True
return meth
class DataFrame(object):
"""All local or remote datasets are encapsulated in this class, which provides a pandas
like API to your dataset.
Each DataFrame (df) has a number of columns, and a number of rows, the length of the DataFrame.
All DataFrames have multiple 'selection', and all calculations are done on the whole DataFrame (default)
or for the selection. The following example shows how to use the selection.
>>> df.select("x < 0")
>>> df.sum(df.y, selection=True)
>>> df.sum(df.y, selection=[df.x < 0, df.x > 0])
:type signal_selection_changed: events.Signal
:type executor: Executor
"""
def __init__(self, name=None, executor=None):
self.name = name
self.column_names = []
self.executor = executor or get_main_executor()
self.signal_pick = vaex.events.Signal("pick")
self.signal_sequence_index_change = vaex.events.Signal("sequence index change")
self.signal_selection_changed = vaex.events.Signal("selection changed")
self.signal_active_fraction_changed = vaex.events.Signal("active fraction changed")
self.signal_column_changed = vaex.events.Signal("a column changed") # (df, column_name, change_type=["add", "remove", "change"])
self.signal_variable_changed = vaex.events.Signal("a variable changed")
self.variables = {}
self.virtual_columns = {}
# we also store the virtual columns as expressions, for performance reasons
# the expression object can cache the ast, making renaming/rewriting faster
self._virtual_expressions = {}
self.functions = {}
self._length_original = None
self._length_unfiltered = None
self._cached_filtered_length = None
self._active_fraction = 1
self._current_row = None
self._index_start = 0
self._index_end = None
self.description = None
self.ucds = {}
self.units = {}
self.descriptions = {}
self.favorite_selections = {}
self.mask = None # a bitmask for the selection does not work for server side
# maps from name to list of Selection objets
self.selection_histories = collections.defaultdict(list)
# after an undo, the last one in the history list is not the active one, -1 means no selection
self.selection_history_indices = collections.defaultdict(lambda: -1)
assert self.filtered is False
self._auto_fraction = False
self._sparse_matrices = {} # record which sparse columns belong to which sparse matrix
self._categories = {}
self._selection_mask_caches = collections.defaultdict(dict)
self._selection_masks = {} # maps to vaex.superutils.Mask object
self._renamed_columns = []
# weak refs of expression that we keep to rewrite expressions
self._expressions = []
self.local = threading.local()
# a check to avoid nested aggregator calls, which make stack traces very difficult
# like the ExecutorLocal.local.executing, this needs to be thread local
self.local._aggregator_nest_count = 0
self._task_aggs = {}
self._binners = {}
self._grids = {}
def __getattr__(self, name):
# will support the hidden methods
if name in self.__hidden__:
return self.__hidden__[name].__get__(self)
else:
return object.__getattribute__(self, name)
def _ipython_key_completions_(self):
return self.get_column_names()
@property
def func(self):
class Functions(object):
pass
functions = Functions()
for name, value in expression_namespace.items():
# f = vaex.expression.FunctionBuiltin(self, name)
def closure(name=name, value=value):
local_name = name
def wrap(*args, **kwargs):
def myrepr(k):
if isinstance(k, Expression):
return str(k)
elif isinstance(k, np.ndarray) and k.ndim == 0:
# to support numpy scalars
return myrepr(k.item())
elif isinstance(k, np.ndarray):
# to support numpy arrays
var = self.add_variable('arg_numpy_array', k, unique=True)
return var
elif isinstance(k, list):
# to support numpy scalars
return '[' + ', '.join(myrepr(i) for i in k) + ']'
else:
return repr(k)
arg_string = ", ".join([myrepr(k) for k in args] + ['{}={}'.format(name, myrepr(value)) for name, value in kwargs.items()])
expression = "{}({})".format(local_name, arg_string)
return vaex.expression.Expression(self, expression)
return wrap
f = closure()
try:
f = functools.wraps(value)(f)
except AttributeError:
pass # python2 quicks.. ?
setattr(functions, name, f)
for name, value in self.functions.items():
setattr(functions, name, value)
return functions
@_hidden
@vaex.utils.deprecated('use is_category')
def iscategory(self, column):
return self.is_category(column)
def is_datetime(self, expression):
dtype = self.data_type(expression)
return isinstance(dtype, np.dtype) and dtype.kind == 'M'
def is_string(self, expression):
return vaex.array_types.is_string_type(self.data_type(expression))
def is_category(self, column):
"""Returns true if column is a category."""
column = _ensure_string_from_expression(column)
# TODO: we don't support DictionaryType for remote dataframes
if self.is_local() and column in self.columns:
# TODO: we don't support categories as expressions
x = self.columns[column]
if isinstance(x, (pa.Array, pa.ChunkedArray)):
arrow_type = x.type
if isinstance(arrow_type, pa.DictionaryType):
return True
return column in self._categories
def category_labels(self, column, aslist=True):
column = _ensure_string_from_expression(column)
if column in self._categories:
return self._categories[column]['labels']
if column in self.columns:
x = self.columns[column]
arrow_type = x.type
# duplicate code in array_types.py
if isinstance(arrow_type, pa.DictionaryType):
# we're interested in the type of the dictionary or the indices?
if isinstance(x, pa.ChunkedArray):
# take the first dictionaryu
x = x.chunks[0]
dictionary = x.dictionary
if aslist:
dictionary = dictionary.to_pylist()
return dictionary
def category_values(self, column):
column = _ensure_string_from_expression(column)
return self._categories[column]['values']
def category_count(self, column):
column = _ensure_string_from_expression(column)
return self._categories[column]['N']
def category_offset(self, column):
column = _ensure_string_from_expression(column)
return self._categories[column]['min_value']
def execute(self):
'''Execute all delayed jobs.'''
from .asyncio import just_run
just_run(self.execute_async())
async def execute_async(self):
'''Async version of execute'''
# no need to clear _task_aggs anymore, since they will be removed for the executors' task list
await self.executor.execute_async()
@property
def filtered(self):
return self.has_selection(FILTER_SELECTION_NAME)
def map_reduce(self, map, reduce, arguments, progress=False, delay=False, info=False, to_numpy=True, ignore_filter=False, pre_filter=False, name='map reduce (custom)', selection=None):
# def map_wrapper(*blocks):
task = tasks.TaskMapReduce(self, arguments, map, reduce, info=info, to_numpy=to_numpy, ignore_filter=ignore_filter, selection=selection, pre_filter=pre_filter)
progressbar = vaex.utils.progressbars(progress)
progressbar.add_task(task, name)
self.executor.schedule(task)
return self._delay(delay, task)
def apply(self, f, arguments=None, dtype=None, delay=False, vectorize=False):
"""Apply a function on a per row basis across the entire DataFrame.
Example:
>>> import vaex
>>> df = vaex.example()
>>> def func(x, y):
... return (x+y)/(x-y)
...
>>> df.apply(func, arguments=[df.x, df.y])
Expression = lambda_function(x, y)
Length: 330,000 dtype: float64 (expression)
-------------------------------------------
0 -0.460789
1 3.90038
2 -0.642851
3 0.685768
4 -0.543357
:param f: The function to be applied
:param arguments: List of arguments to be passed on to the function f.
:return: A function that is lazily evaluated.
"""
assert arguments is not None, 'for now, you need to supply arguments'
import types
if isinstance(f, types.LambdaType):
name = 'lambda_function'
else:
name = f.__name__
if not vectorize:
f = vaex.expression.FunctionToScalar(f)
lazy_function = self.add_function(name, f, unique=True)
arguments = _ensure_strings_from_expressions(arguments)
return lazy_function(*arguments)
def nop(self, expression, progress=False, delay=False):
"""Evaluates expression, and drop the result, usefull for benchmarking, since vaex is usually lazy"""
expression = _ensure_string_from_expression(expression)
def map(ar):
pass
def reduce(a, b):
pass
return self.map_reduce(map, reduce, [expression], delay=delay, progress=progress, name='nop', to_numpy=False)
def _set(self, expression, progress=False, selection=None, delay=False):
column = _ensure_string_from_expression(expression)
columns = [column]
from .hash import ordered_set_type_from_dtype
from vaex.column import _to_string_sequence
transient = self[str(expression)].transient or self.filtered or self.is_masked(expression)
if self.is_string(expression) and not transient:
# string is a special case, only ColumnString are not transient
ar = self.columns[str(expression)]
if not isinstance(ar, ColumnString):
transient = True
dtype = self.data_type(column)
ordered_set_type = ordered_set_type_from_dtype(dtype, transient)
sets = [None] * self.executor.thread_pool.nthreads
def map(thread_index, i1, i2, ar):
if sets[thread_index] is None:
sets[thread_index] = ordered_set_type()
if vaex.array_types.is_string_type(dtype):
previous_ar = ar
ar = _to_string_sequence(ar)
if not transient:
assert ar is previous_ar.string_sequence
if np.ma.isMaskedArray(ar):
mask = np.ma.getmaskarray(ar)
sets[thread_index].update(ar, mask)
else:
sets[thread_index].update(ar)
def reduce(a, b):
pass
self.map_reduce(map, reduce, columns, delay=delay, name='set', info=True, to_numpy=False, selection=selection)
sets = [k for k in sets if k is not None]
set0 = sets[0]
for other in sets[1:]:
set0.merge(other)
return set0
def _index(self, expression, progress=False, delay=False):
column = _ensure_string_from_expression(expression)
# TODO: this does not seem needed
# column = vaex.utils.valid_expression(self.dataset, column)
columns = [column]
from .hash import index_type_from_dtype
from vaex.column import _to_string_sequence
transient = self[column].transient or self.filtered or self.is_masked(column)
if self.is_string(expression) and not transient:
# string is a special case, only ColumnString are not transient
ar = self.columns[str(self[column].expand())]
if not isinstance(ar, ColumnString):
transient = True
dtype = self.data_type(column)
index_type = index_type_from_dtype(dtype, transient)
index_list = [None] * self.executor.thread_pool.nthreads
def map(thread_index, i1, i2, ar):
if index_list[thread_index] is None:
index_list[thread_index] = index_type()
if vaex.array_types.is_string_type(dtype):
previous_ar = ar
ar = _to_string_sequence(ar)
if not transient:
assert ar is previous_ar.string_sequence
if np.ma.isMaskedArray(ar):
mask = np.ma.getmaskarray(ar)
index_list[thread_index].update(ar, mask, i1)
else:
index_list[thread_index].update(ar, i1)
def reduce(a, b):
pass
self.map_reduce(map, reduce, columns, delay=delay, name='index', info=True, to_numpy=False)
index_list = [k for k in index_list if k is not None]
index0 = index_list[0]
for other in index_list[1:]:
index0.merge(other)
return index0
def unique(self, expression, return_inverse=False, dropna=False, dropnan=False, dropmissing=False, progress=False, selection=None, delay=False):
if dropna:
dropnan = True
dropmissing = True
expression = _ensure_string_from_expression(expression)
ordered_set = self._set(expression, progress=progress, selection=selection)
transient = True
if return_inverse:
# inverse type can be smaller, depending on length of set
inverse = np.zeros(self._length_unfiltered, dtype=np.int64)
dtype = self.data_type(expression)
from vaex.column import _to_string_sequence
def map(thread_index, i1, i2, ar):
if vaex.array_types.is_string_type(dtype):
previous_ar = ar
ar = _to_string_sequence(ar)
if not transient:
assert ar is previous_ar.string_sequence
# TODO: what about masked values?
inverse[i1:i2] = ordered_set.map_ordinal(ar)
def reduce(a, b):
pass
self.map_reduce(map, reduce, [expression], delay=delay, name='unique_return_inverse', info=True, to_numpy=False, selection=selection)
keys = ordered_set.keys()
if not dropnan:
if ordered_set.has_nan:
keys = [np.nan] + keys
if self.is_string(expression):
if not dropmissing:
if ordered_set.has_null:
# arrow handles None as missing
keys = [None] + keys
keys = pa.array(keys)
else:
masked = False
if not dropmissing:
if ordered_set.has_null:
masked = True
keys = [np.ma.core.MaskedConstant()] + keys
keys = np.ma.asarray(keys) if masked else np.asarray(keys)
if return_inverse:
return keys, inverse
else:
return keys
@docsubst
def mutual_information(self, x, y=None, mi_limits=None, mi_shape=256, binby=[], limits=None, shape=default_shape, sort=False, selection=False, delay=False):
"""Estimate the mutual information between and x and y on a grid with shape mi_shape and mi_limits, possibly on a grid defined by binby.
If sort is True, the mutual information is returned in sorted (descending) order and the list of expressions is returned in the same order.
Example:
>>> df.mutual_information("x", "y")
array(0.1511814526380327)
>>> df.mutual_information([["x", "y"], ["x", "z"], ["E", "Lz"]])
array([ 0.15118145, 0.18439181, 1.07067379])
>>> df.mutual_information([["x", "y"], ["x", "z"], ["E", "Lz"]], sort=True)
(array([ 1.07067379, 0.18439181, 0.15118145]),
[['E', 'Lz'], ['x', 'z'], ['x', 'y']])
:param x: {expression}
:param y: {expression}
:param limits: {limits}
:param shape: {shape}
:param binby: {binby}
:param limits: {limits}
:param shape: {shape}
:param sort: return mutual information in sorted (descending) order, and also return the correspond list of expressions when sorted is True
:param selection: {selection}
:param delay: {delay}
:return: {return_stat_scalar},
"""
if y is None:
waslist, [x, ] = vaex.utils.listify(x)
else:
waslist, [x, y] = vaex.utils.listify(x, y)
x = list(zip(x, y))
if mi_limits:
mi_limits = [mi_limits]
# print("x, mi_limits", x, mi_limits)
limits = self.limits(binby, limits, delay=True)
# print("$"*80)
mi_limits = self.limits(x, mi_limits, delay=True)
# print("@"*80)
@delayed
def calculate(counts):
# TODO: mutual information doesn't take axis arguments, so ugly solution for now
counts = counts.astype(np.float64)
fullshape = _expand_shape(shape, len(binby))
out = np.zeros((fullshape), dtype=float)
if len(fullshape) == 0:
out = vaex.kld.mutual_information(counts)
# print("count> ", np.sum(counts))
elif len(fullshape) == 1:
for i in range(fullshape[0]):
out[i] = vaex.kld.mutual_information(counts[..., i])
# print("counti> ", np.sum(counts[...,i]))
# print("countt> ", np.sum(counts))
elif len(fullshape) == 2:
for i in range(fullshape[0]):
for j in range(fullshape[1]):
out[i, j] = vaex.kld.mutual_information(counts[..., i, j])
elif len(fullshape) == 3:
for i in range(fullshape[0]):
for j in range(fullshape[1]):
for k in range(fullshape[2]):
out[i, j, k] = vaex.kld.mutual_information(counts[..., i, j, k])
else:
raise ValueError("binby with dim > 3 is not yet supported")
return out
@delayed
def has_limits(limits, mi_limits):
if not _issequence(binby):
limits = [list(limits)]
values = []
for expressions, expression_limits in zip(x, mi_limits):
# print("mi for", expressions, expression_limits)
# total_shape = _expand_shape(mi_shape, len(expressions)) + _expand_shape(shape, len(binby))
total_shape = _expand_shape(mi_shape, len(expressions)) + _expand_shape(shape, len(binby))
# print("expressions", expressions)
# print("total_shape", total_shape)
# print("limits", limits,expression_limits)
# print("limits>", list(limits) + list(expression_limits))
counts = self.count(binby=list(expressions) + list(binby), limits=list(expression_limits) + list(limits),
shape=total_shape, delay=True, selection=selection)
values.append(calculate(counts))
return values
@delayed
def finish(mi_list):
if sort:
mi_list = np.array(mi_list)
indices = np.argsort(mi_list)[::-1]
sorted_x = list([x[k] for k in indices])
return mi_list[indices], sorted_x
else:
return np.array(vaex.utils.unlistify(waslist, mi_list))
values = finish(delayed_list(has_limits(limits, mi_limits)))
return self._delay(delay, values)
def bin_edges(self, expression, limits, shape=default_shape):
return self.bins(expression, limits, shape=shape, edges=True)
def bin_centers(self, expression, limits, shape=default_shape):
return self.bins(expression, limits, shape=shape, edges=False)
def bins(self, expression, limits, shape=default_shape, edges=True):
vmin, vmax = limits
if edges:
bins = np.ogrid[limits[0]:limits[1]:(shape + 1) * 1j]
return bins
else:
dx = (limits[1] - limits[0]) / shape
bins = np.ogrid[limits[0]:limits[1] - dx:(shape) * 1j]
return bins + dx / 2
def nearest_bin(self, value, limits, shape):
bins = self.bins('', limits=limits, edges=False, shape=shape)
index = np.argmin(np.abs(bins - value))
print(bins, value, index)
return index
def _compute_agg(self, name, expression, binby=[], limits=None, shape=default_shape, selection=False, delay=False, edges=False, progress=None, extra_expressions=None, array_type=None):
logger.debug("aggregate %s(%r, binby=%r, limits=%r)", name, expression, binby, limits)
expression = _ensure_strings_from_expressions(expression)
if extra_expressions:
extra_expressions = _ensure_strings_from_expressions(extra_expressions)
expression_waslist, [expressions, ] = vaex.utils.listify(expression)
# TODO: doesn't seemn needed anymore?
# expressions = [self._column_aliases.get(k, k) for k in expressions]
import traceback
trace = ''.join(traceback.format_stack())
for expression in expressions:
if expression and expression != "*":
self.validate_expression(expression)
if not hasattr(self.local, '_aggregator_nest_count'):
self.local._aggregator_nest_count = 0
if self.local._aggregator_nest_count != 0:
raise RuntimeError("nested aggregator call: \nlast trace:\n%s\ncurrent trace:\n%s" % (self.local.last_trace, trace))
else:
self.local.last_trace = trace
# Instead of 'expression is not None', we would like to have 'not virtual'
# but in agg.py we do some casting, which results in calling .dtype(..) with a non-column
# expression even though all expressions passed here are column references
# virtual = [k for k in expressions if k and k not in self.columns]
if self.filtered and expression is not None:
# When our dataframe is filtered, and we have expressions, we may end up calling
# df.dtype(..) which in turn may call df.evaluate(..) which in turn needs to have
# the filter cache filled in order to compute the first non-missing row. This last
# item could call df.count() again, leading to nested aggregators, which we do not
# support. df.dtype() needs to call evaluate with filtering enabled since we consider
# it invalid that expressions are evaluate with filtered data. Sklearn for instance may
# give errors when evaluated with NaN's present.
# TODO: GET RID OF THIS
len(self) # fill caches and masks
# pass
grid = self._create_grid(binby, limits, shape, selection=selection, delay=True)
@delayed
def compute(expression, grid, selection, edges, progressbar):
self.local._aggregator_nest_count += 1
try:
if expression in ["*", None]:
agg = vaex.agg.aggregates[name](selection=selection, edges=edges)
else:
if extra_expressions:
agg = vaex.agg.aggregates[name](expression, *extra_expressions, selection=selection, edges=edges)
else:
agg = vaex.agg.aggregates[name](expression, selection=selection, edges=edges)
task, new_task = self._get_task_agg(grid)
agg_subtask = agg.add_operations(task)
if new_task:
# it is important we schedule the task after we add an operation
# otherwise the task will fail to executor (has to have >= 1 operation)
self.executor.schedule(task)
progressbar.add_task(task, "%s for %s" % (name, expression))
@delayed
def finish(counts):
return np.asarray(counts)
return finish(agg_subtask)
finally:
self.local._aggregator_nest_count -= 1
@delayed
def finish(grid, *counts):
if array_type == 'xarray':
binners = grid.binners
dims = [binner.expression for binner in binners]
if expression_waslist:
dims = ['expression'] + dims
def to_coord(binner):
name = type(binner).__name__
if name.startswith('BinnerOrdinal_'):
return self.category_labels(binner.expression)
elif name.startswith('BinnerScalar_'):
return self.bin_centers(binner.expression, [binner.vmin, binner.vmax], binner.bins)
coords = [to_coord(binner) for binner in binners]
if expression_waslist:
coords = [expressions] + coords
counts = np.asarray(counts)
else:
counts = counts[0]
import xarray
return xarray.DataArray(counts, dims=dims, coords=coords)
elif array_type == 'list':
return vaex.utils.unlistify(expression_waslist, counts).tolist()
elif array_type in [None, 'numpy']:
return np.asarray(vaex.utils.unlistify(expression_waslist, counts))
else:
raise RuntimeError(f'Unknown array_type {format}')
progressbar = vaex.utils.progressbars(progress)
stats = [compute(expression, grid, selection=selection, edges=edges, progressbar=progressbar) for expression in expressions]
var = finish(grid, *stats)
return self._delay(delay, var)
@docsubst
def count(self, expression=None, binby=[], limits=None, shape=default_shape, selection=False, delay=False, edges=False, progress=None, array_type=None):
"""Count the number of non-NaN values (or all, if expression is None or "*").
Example:
>>> df.count()
330000
>>> df.count("*")
330000.0
>>> df.count("*", binby=["x"], shape=4)
array([ 10925., 155427., 152007., 10748.])
:param expression: Expression or column for which to count non-missing values, or None or '*' for counting the rows
:param binby: {binby}
:param limits: {limits}
:param shape: {shape}
:param selection: {selection}
:param delay: {delay}
:param progress: {progress}
:param edges: {edges}
:param array_type: {array_type}
:return: {return_stat_scalar}
"""
return self._compute_agg('count', expression, binby, limits, shape, selection, delay, edges, progress, array_type=array_type)
@delayed
def _first_calculation(self, expression, order_expression, binby, limits, shape, selection, edges, progressbar):
if shape:
limits, shapes = limits
else:
limits, shapes = limits, shape
task = tasks.TaskStatistic(self, binby, shapes, limits, weights=[expression, order_expression], op=tasks.OP_FIRST, selection=selection, edges=edges)
self.executor.schedule(task)
progressbar.add_task(task, "count for %s" % expression)
@delayed
def finish(counts):
counts = np.array(counts)
return counts
return finish(task)
@docsubst
def first(self, expression, order_expression, binby=[], limits=None, shape=default_shape, selection=False, delay=False, edges=False, progress=None, array_type=None):
"""Return the first element of a binned `expression`, where the values each bin are sorted by `order_expression`.
Example:
>>> import vaex
>>> df = vaex.example()
>>> df.first(df.x, df.y, shape=8)
>>> df.first(df.x, df.y, shape=8, binby=[df.y])
>>> df.first(df.x, df.y, shape=8, binby=[df.y])
array([-4.81883764, 11.65378 , 9.70084476, -7.3025589 , 4.84954977,
8.47446537, -5.73602629, 10.18783 ])
:param expression: The value to be placed in the bin.
:param order_expression: Order the values in the bins by this expression.
:param binby: {binby}
:param limits: {limits}
:param shape: {shape}
:param selection: {selection}
:param delay: {delay}
:param progress: {progress}
:param edges: {edges}
:param array_type: {array_type}
:return: Ndarray containing the first elements.
:rtype: numpy.array
"""
return self._compute_agg('first', expression, binby, limits, shape, selection, delay, edges, progress, extra_expressions=[order_expression], array_type=array_type)
logger.debug("count(%r, binby=%r, limits=%r)", expression, binby, limits)
logger.debug("count(%r, binby=%r, limits=%r)", expression, binby, limits)
expression = _ensure_strings_from_expressions(expression)
order_expression = _ensure_string_from_expression(order_expression)
binby = _ensure_strings_from_expressions(binby)
waslist, [expressions,] = vaex.utils.listify(expression)
@delayed
def finish(*counts):
counts = np.asarray(counts)
return vaex.utils.unlistify(waslist, counts)
progressbar = vaex.utils.progressbars(progress)
limits = self.limits(binby, limits, delay=True, shape=shape)
stats = [self._first_calculation(expression, order_expression, binby=binby, limits=limits, shape=shape, selection=selection, edges=edges, progressbar=progressbar) for expression in expressions]
var = finish(*stats)
return self._delay(delay, var)
@docsubst
@stat_1d
def mean(self, expression, binby=[], limits=None, shape=default_shape, selection=False, delay=False, progress=None, edges=False, array_type=None):
"""Calculate the mean for expression, possibly on a grid defined by binby.
Example:
>>> df.mean("x")
-0.067131491264005971
>>> df.mean("(x**2+y**2)**0.5", binby="E", shape=4)
array([ 2.43483742, 4.41840721, 8.26742458, 15.53846476])
:param expression: {expression}
:param binby: {binby}
:param limits: {limits}
:param shape: {shape}
:param selection: {selection}
:param delay: {delay}
:param progress: {progress}
:param array_type: {array_type}
:return: {return_stat_scalar}
"""
return self._compute_agg('mean', expression, binby, limits, shape, selection, delay, edges, progress, array_type=array_type)
logger.debug("mean of %r, with binby=%r, limits=%r, shape=%r, selection=%r, delay=%r", expression, binby, limits, shape, selection, delay)
expression = _ensure_strings_from_expressions(expression)
selection = _ensure_strings_from_expressions(selection)
binby = _ensure_strings_from_expressions(binby)
@delayed
def calculate(expression, limits):
task = tasks.TaskStatistic(self, binby, shape, limits, weight=expression, op=tasks.OP_ADD_WEIGHT_MOMENTS_01, selection=selection)
self.executor.schedule(task)
progressbar.add_task(task, "mean for %s" % expression)
return task
@delayed
def finish(*stats_args):
stats = np.array(stats_args)
counts = stats[..., 0]
with np.errstate(divide='ignore', invalid='ignore'):
mean = stats[..., 1] / counts
return vaex.utils.unlistify(waslist, mean)
waslist, [expressions, ] = vaex.utils.listify(expression)
progressbar = vaex.utils.progressbars(progress)
limits = self.limits(binby, limits, delay=True)
stats = [calculate(expression, limits) for expression in expressions]
var = finish(*stats)
return self._delay(delay, var)
@delayed
def _sum_calculation(self, expression, binby, limits, shape, selection, progressbar):
task = tasks.TaskStatistic(self, binby, shape, limits, weight=expression, op=tasks.OP_ADD_WEIGHT_MOMENTS_01, selection=selection)
self.executor.schedule(task)
progressbar.add_task(task, "sum for %s" % expression)
@delayed
def finish(sum_grid):
stats = np.array(sum_grid)
return stats[...,1]
return finish(task)
@docsubst
@stat_1d
def sum(self, expression, binby=[], limits=None, shape=default_shape, selection=False, delay=False, progress=None, edges=False, array_type=None):
"""Calculate the sum for the given expression, possible on a grid defined by binby
Example:
>>> df.sum("L")
304054882.49378014
>>> df.sum("L", binby="E", shape=4)
array([ 8.83517994e+06, 5.92217598e+07, 9.55218726e+07,
1.40008776e+08])
:param expression: {expression}
:param binby: {binby}
:param limits: {limits}
:param shape: {shape}
:param selection: {selection}
:param delay: {delay}
:param progress: {progress}
:param array_type: {array_type}
:return: {return_stat_scalar}
"""
return self._compute_agg('sum', expression, binby, limits, shape, selection, delay, edges, progress, array_type=array_type)
@delayed
def finish(*sums):
return vaex.utils.unlistify(waslist, sums)
expression = _ensure_strings_from_expressions(expression)
binby = _ensure_strings_from_expressions(binby)
waslist, [expressions, ] = vaex.utils.listify(expression)
progressbar = vaex.utils.progressbars(progress)
limits = self.limits(binby, limits, delay=True)
# stats = [calculate(expression, limits) for expression in expressions]
sums = [self._sum_calculation(expression, binby=binby, limits=limits, shape=shape, selection=selection, progressbar=progressbar) for expression in expressions]
s = finish(*sums)
return self._delay(delay, s)
@docsubst
@stat_1d
def std(self, expression, binby=[], limits=None, shape=default_shape, selection=False, delay=False, progress=None, array_type=None):
"""Calculate the standard deviation for the given expression, possible on a grid defined by binby
>>> df.std("vz")
110.31773397535071
>>> df.std("vz", binby=["(x**2+y**2)**0.5"], shape=4)
array([ 123.57954851, 85.35190177, 61.14345748, 38.0740619 ])
:param expression: {expression}
:param binby: {binby}
:param limits: {limits}
:param shape: {shape}
:param selection: {selection}
:param delay: {delay}
:param progress: {progress}
:param array_type: {array_type}
:return: {return_stat_scalar}
"""
@delayed
def finish(var):
return var**0.5
return self._delay(delay, finish(self.var(expression, binby=binby, limits=limits, shape=shape, selection=selection, delay=True, progress=progress)))
@docsubst
@stat_1d
def var(self, expression, binby=[], limits=None, shape=default_shape, selection=False, delay=False, progress=None, array_type=None):
"""Calculate the sample variance for the given expression, possible on a grid defined by binby
Example:
>>> df.var("vz")
12170.002429456246
>>> df.var("vz", binby=["(x**2+y**2)**0.5"], shape=4)
array([ 15271.90481083, 7284.94713504, 3738.52239232, 1449.63418988])
>>> df.var("vz", binby=["(x**2+y**2)**0.5"], shape=4)**0.5
array([ 123.57954851, 85.35190177, 61.14345748, 38.0740619 ])
>>> df.std("vz", binby=["(x**2+y**2)**0.5"], shape=4)
array([ 123.57954851, 85.35190177, 61.14345748, 38.0740619 ])
:param expression: {expression}
:param binby: {binby}
:param limits: {limits}
:param shape: {shape}
:param selection: {selection}
:param delay: {delay}
:param progress: {progress}
:param array_type: {array_type}
:return: {return_stat_scalar}
"""
edges = False
return self._compute_agg('var', expression, binby, limits, shape, selection, delay, edges, progress, array_type=array_type)
expression = _ensure_strings_from_expressions(expression)
@delayed
def calculate(expression, limits):
task = tasks.TaskStatistic(self, binby, shape, limits, weight=expression, op=tasks.OP_ADD_WEIGHT_MOMENTS_012, selection=selection)
progressbar.add_task(task, "var for %s" % expression)
self.executor.schedule(task)
return task
@delayed
def finish(*stats_args):
stats = np.array(stats_args)
counts = stats[..., 0]
with np.errstate(divide='ignore'):
with np.errstate(divide='ignore', invalid='ignore'): # these are fine, we are ok with nan's in vaex
mean = stats[..., 1] / counts
raw_moments2 = stats[..., 2] / counts
variance = (raw_moments2 - mean**2)
return vaex.utils.unlistify(waslist, variance)
binby = _ensure_strings_from_expressions(binby)
waslist, [expressions, ] = vaex.utils.listify(expression)
progressbar = vaex.utils.progressbars(progress)
limits = self.limits(binby, limits, delay=True)
stats = [calculate(expression, limits) for expression in expressions]
var = finish(*stats)
return self._delay(delay, var)
@docsubst
def covar(self, x, y, binby=[], limits=None, shape=default_shape, selection=False, delay=False, progress=None):
"""Calculate the covariance cov[x,y] between x and y, possibly on a grid defined by binby.
Example:
>>> df.covar("x**2+y**2+z**2", "-log(-E+1)")
array(52.69461456005138)
>>> df.covar("x**2+y**2+z**2", "-log(-E+1)")/(df.std("x**2+y**2+z**2") * df.std("-log(-E+1)"))
0.63666373822156686
>>> df.covar("x**2+y**2+z**2", "-log(-E+1)", binby="Lz", shape=4)
array([ 10.17387143, 51.94954078, 51.24902796, 20.2163929 ])
:param x: {expression}
:param y: {expression}
:param binby: {binby}
:param limits: {limits}
:param shape: {shape}
:param selection: {selection}
:param delay: {delay}
:param progress: {progress}
:return: {return_stat_scalar}
"""
@delayed
def cov(mean_x, mean_y, mean_xy):
return mean_xy - mean_x * mean_y
waslist, [xlist, ylist] = vaex.utils.listify(x, y)
# print("limits", limits)
limits = self.limits(binby, limits, selection=selection, delay=True)
# print("limits", limits)
@delayed
def calculate(limits):
results = []
for x, y in zip(xlist, ylist):
mx = self.mean(x, binby=binby, limits=limits, shape=shape, selection=selection, delay=True, progress=progressbar)
my = self.mean(y, binby=binby, limits=limits, shape=shape, selection=selection, delay=True, progress=progressbar)
cxy = self.mean("(%s)*(%s)" % (x, y), binby=binby, limits=limits, shape=shape, selection=selection,
delay=True, progress=progressbar)
results.append(cov(mx, my, cxy))
return results
progressbar = vaex.utils.progressbars(progress)
covars = calculate(limits)
@delayed
def finish(covars):
value = np.array(vaex.utils.unlistify(waslist, covars))
return value
return self._delay(delay, finish(delayed_list(covars)))
@docsubst
def correlation(self, x, y=None, binby=[], limits=None, shape=default_shape, sort=False, sort_key=np.abs, selection=False, delay=False, progress=None):
"""Calculate the correlation coefficient cov[x,y]/(std[x]*std[y]) between x and y, possibly on a grid defined by binby.
Example:
>>> df.correlation("x**2+y**2+z**2", "-log(-E+1)")
array(0.6366637382215669)
>>> df.correlation("x**2+y**2+z**2", "-log(-E+1)", binby="Lz", shape=4)
array([ 0.40594394, 0.69868851, 0.61394099, 0.65266318])
:param x: {expression}
:param y: {expression}
:param binby: {binby}
:param limits: {limits}
:param shape: {shape}
:param selection: {selection}
:param delay: {delay}
:param progress: {progress}
:return: {return_stat_scalar}
"""
@delayed
def corr(cov):
with np.errstate(divide='ignore', invalid='ignore'): # these are fine, we are ok with nan's in vaex
return cov[..., 0, 1] / (cov[..., 0, 0] * cov[..., 1, 1])**0.5
if y is None:
if not isinstance(x, (tuple, list)):
raise ValueError("if y not given, x is expected to be a list or tuple, not %r" % x)
if _issequence(x) and not _issequence(x[0]) and len(x) == 2:
x = [x]
if not(_issequence(x) and all([_issequence(k) and len(k) == 2 for k in x])):
raise ValueError("if y not given, x is expected to be a list of lists with length 2, not %r" % x)
# waslist, [xlist,ylist] = vaex.utils.listify(*x)
waslist = True
xlist, ylist = zip(*x)
# print xlist, ylist
else:
waslist, [xlist, ylist] = vaex.utils.listify(x, y)
xlist = _ensure_strings_from_expressions(xlist)
ylist = _ensure_strings_from_expressions(ylist)
limits = self.limits(binby, limits, selection=selection, delay=True)
@delayed
def echo(limits):
logger.debug(">>>>>>>>: %r %r", limits, np.array(limits).shape)
echo(limits)
@delayed
def calculate(limits):
results = []
for x, y in zip(xlist, ylist):
task = self.cov(x, y, binby=binby, limits=limits, shape=shape, selection=selection, delay=True,
progress=progressbar)
results.append(corr(task))
return results
progressbar = vaex.utils.progressbars(progress)
correlations = calculate(limits)
@delayed
def finish(correlations):
if sort:
correlations = np.array(correlations)
indices = np.argsort(sort_key(correlations) if sort_key else correlations)[::-1]
sorted_x = list([x[k] for k in indices])
return correlations[indices], sorted_x
value = np.array(vaex.utils.unlistify(waslist, correlations))
return value
return self._delay(delay, finish(delayed_list(correlations)))
@docsubst
def cov(self, x, y=None, binby=[], limits=None, shape=default_shape, selection=False, delay=False, progress=None):
"""Calculate the covariance matrix for x and y or more expressions, possibly on a grid defined by binby.
Either x and y are expressions, e.g.:
>>> df.cov("x", "y")
Or only the x argument is given with a list of expressions, e.g.:
>>> df.cov(["x, "y, "z"])
Example:
>>> df.cov("x", "y")
array([[ 53.54521742, -3.8123135 ],
[ -3.8123135 , 60.62257881]])
>>> df.cov(["x", "y", "z"])
array([[ 53.54521742, -3.8123135 , -0.98260511],
[ -3.8123135 , 60.62257881, 1.21381057],
[ -0.98260511, 1.21381057, 25.55517638]])
>>> df.cov("x", "y", binby="E", shape=2)
array([[[ 9.74852878e+00, -3.02004780e-02],
[ -3.02004780e-02, 9.99288215e+00]],
[[ 8.43996546e+01, -6.51984181e+00],
[ -6.51984181e+00, 9.68938284e+01]]])
:param x: {expression}
:param y: {expression_single}
:param binby: {binby}
:param limits: {limits}
:param shape: {shape}
:param selection: {selection}
:param delay: {delay}
:return: {return_stat_scalar}, the last dimensions are of shape (2,2)
"""
selection = _ensure_strings_from_expressions(selection)
if y is None:
if not _issequence(x):
raise ValueError("if y argument is not given, x is expected to be sequence, not %r", x)
expressions = x
else:
expressions = [x, y]
N = len(expressions)
binby = _ensure_list(binby)
shape = _expand_shape(shape, len(binby))
progressbar = vaex.utils.progressbars(progress)
limits = self.limits(binby, limits, selection=selection, delay=True)
@delayed
def calculate(expressions, limits):
# print('limits', limits)
task = tasks.TaskStatistic(self, binby, shape, limits, weights=expressions, op=tasks.OP_COV, selection=selection)
self.executor.schedule(task)
progressbar.add_task(task, "covariance values for %r" % expressions)
return task
@delayed
def finish(values):
N = len(expressions)
counts = values[..., :N]
sums = values[..., N:2 * N]
with np.errstate(divide='ignore', invalid='ignore'):
means = sums / counts
# matrix of means * means.T
meansxy = means[..., None] * means[..., None, :]
counts = values[..., 2 * N:2 * N + N**2]
sums = values[..., 2 * N + N**2:]
shape = counts.shape[:-1] + (N, N)
counts = counts.reshape(shape)
sums = sums.reshape(shape)
with np.errstate(divide='ignore', invalid='ignore'):
moments2 = sums / counts
cov_matrix = moments2 - meansxy
return cov_matrix
progressbar = vaex.utils.progressbars(progress)
values = calculate(expressions, limits)
cov_matrix = finish(values)
return self._delay(delay, cov_matrix)
@docsubst
@stat_1d
def minmax(self, expression, binby=[], limits=None, shape=default_shape, selection=False, delay=False, progress=None):
"""Calculate the minimum and maximum for expressions, possibly on a grid defined by binby.
Example:
>>> df.minmax("x")
array([-128.293991, 271.365997])
>>> df.minmax(["x", "y"])
array([[-128.293991 , 271.365997 ],
[ -71.5523682, 146.465836 ]])
>>> df.minmax("x", binby="x", shape=5, limits=[-10, 10])
array([[-9.99919128, -6.00010443],
[-5.99972439, -2.00002384],
[-1.99991322, 1.99998057],
[ 2.0000093 , 5.99983597],
[ 6.0004878 , 9.99984646]])
:param expression: {expression}
:param binby: {binby}
:param limits: {limits}
:param shape: {shape}
:param selection: {selection}
:param delay: {delay}
:param progress: {progress}
:return: {return_stat_scalar}, the last dimension is of shape (2)
"""
# vmin = self._compute_agg('min', expression, binby, limits, shape, selection, delay, edges, progress)
# vmax = self._compute_agg('max', expression, binby, limits, shape, selection, delay, edges, progress)
@delayed
def calculate(expression, limits):
task = tasks.TaskStatistic(self, binby, shape, limits, weight=expression, op=tasks.OP_MIN_MAX, selection=selection)
self.executor.schedule(task)
progressbar.add_task(task, "minmax for %s" % expression)
return task
@delayed
def finish(*minmax_list):
value = vaex.utils.unlistify(waslist, np.array(minmax_list))
value = vaex.array_types.to_numpy(value)
value = value.astype(vaex.array_types.to_numpy_type(data_type0))
return value
expression = _ensure_strings_from_expressions(expression)
binby = _ensure_strings_from_expressions(binby)
waslist, [expressions, ] = vaex.utils.listify(expression)
column_names = self.get_column_names(hidden=True)
expressions = [vaex.utils.valid_expression(column_names, k) for k in expressions]
data_types = [self.data_type(expr) for expr in expressions]
data_type0 = data_types[0]
# special case that we supported mixed endianness for ndarrays
all_same_kind = all(isinstance(data_type, np.dtype) for data_type in data_types) and all([k.kind == data_type0.kind for k in data_types])
if not (all_same_kind or all([vaex.array_types.same_type(k, data_type0) for k in data_types])):
raise TypeError("cannot mix different dtypes in 1 minmax call")
progressbar = vaex.utils.progressbars(progress, name="minmaxes")
limits = self.limits(binby, limits, selection=selection, delay=True)
all_tasks = [calculate(expression, limits) for expression in expressions]
result = finish(*all_tasks)
return self._delay(delay, result)
@docsubst
@stat_1d
def min(self, expression, binby=[], limits=None, shape=default_shape, selection=False, delay=False, progress=None, edges=False, array_type=None):
"""Calculate the minimum for given expressions, possibly on a grid defined by binby.
Example:
>>> df.min("x")
array(-128.293991)
>>> df.min(["x", "y"])
array([-128.293991 , -71.5523682])
>>> df.min("x", binby="x", shape=5, limits=[-10, 10])
array([-9.99919128, -5.99972439, -1.99991322, 2.0000093 , 6.0004878 ])
:param expression: {expression}
:param binby: {binby}
:param limits: {limits}
:param shape: {shape}
:param selection: {selection}
:param delay: {delay}
:param progress: {progress}
:param array_type: {array_type}
:return: {return_stat_scalar}, the last dimension is of shape (2)
"""
return self._compute_agg('min', expression, binby, limits, shape, selection, delay, edges, progress, array_type=array_type)
@delayed
def finish(result):
return result[..., 0]
return self._delay(delay, finish(self.minmax(expression, binby=binby, limits=limits, shape=shape, selection=selection, delay=delay, progress=progress)))
@docsubst
@stat_1d
def max(self, expression, binby=[], limits=None, shape=default_shape, selection=False, delay=False, progress=None, edges=False, array_type=None):
"""Calculate the maximum for given expressions, possibly on a grid defined by binby.
Example:
>>> df.max("x")
array(271.365997)
>>> df.max(["x", "y"])
array([ 271.365997, 146.465836])
>>> df.max("x", binby="x", shape=5, limits=[-10, 10])
array([-6.00010443, -2.00002384, 1.99998057, 5.99983597, 9.99984646])
:param expression: {expression}
:param binby: {binby}
:param limits: {limits}
:param shape: {shape}
:param selection: {selection}
:param delay: {delay}
:param progress: {progress}
:param array_type: {array_type}
:return: {return_stat_scalar}, the last dimension is of shape (2)
"""
return self._compute_agg('max', expression, binby, limits, shape, selection, delay, edges, progress, array_type=array_type)
@delayed
def finish(result):
return result[..., 1]
return self._delay(delay, finish(self.minmax(expression, binby=binby, limits=limits, shape=shape, selection=selection, delay=delay, progress=progress)))
@docsubst
@stat_1d
def median_approx(self, expression, percentage=50., binby=[], limits=None, shape=default_shape, percentile_shape=256, percentile_limits="minmax", selection=False, delay=False):
"""Calculate the median, possibly on a grid defined by binby.
NOTE: this value is approximated by calculating the cumulative distribution on a grid defined by
percentile_shape and percentile_limits
:param expression: {expression}
:param binby: {binby}
:param limits: {limits}
:param shape: {shape}
:param percentile_limits: {percentile_limits}
:param percentile_shape: {percentile_shape}
:param selection: {selection}
:param delay: {delay}
:return: {return_stat_scalar}
"""
return self.percentile_approx(expression, 50, binby=binby, limits=limits, shape=shape, percentile_shape=percentile_shape, percentile_limits=percentile_limits, selection=selection, delay=delay)
@docsubst
def percentile_approx(self, expression, percentage=50., binby=[], limits=None, shape=default_shape, percentile_shape=1024, percentile_limits="minmax", selection=False, delay=False):
"""Calculate the percentile given by percentage, possibly on a grid defined by binby.
NOTE: this value is approximated by calculating the cumulative distribution on a grid defined by
percentile_shape and percentile_limits.
Example:
>>> df.percentile_approx("x", 10), df.percentile_approx("x", 90)
(array([-8.3220355]), array([ 7.92080358]))
>>> df.percentile_approx("x", 50, binby="x", shape=5, limits=[-10, 10])
array([[-7.56462982],
[-3.61036641],
[-0.01296306],
[ 3.56697863],
[ 7.45838367]])
:param expression: {expression}
:param binby: {binby}
:param limits: {limits}
:param shape: {shape}
:param percentile_limits: {percentile_limits}
:param percentile_shape: {percentile_shape}
:param selection: {selection}
:param delay: {delay}
:return: {return_stat_scalar}
"""
waslist, [expressions, ] = vaex.utils.listify(expression)
if not isinstance(binby, (tuple, list)):
binby = [binby]
else:
binby = binby
@delayed
def calculate(expression, shape, limits):
# task = TaskStatistic(self, [expression] + binby, shape, limits, op=OP_ADD1, selection=selection)
# self.executor.schedule(task)
# return task
return self.count(binby=list(binby) + [expression], shape=shape, limits=limits, selection=selection, delay=True, edges=True)
@delayed
def finish(percentile_limits, counts_list):
results = []
for i, counts in enumerate(counts_list):
counts = counts.astype(np.float)
# remove the nan and boundary edges from the first dimension,
nonnans = list([slice(2, -1, None) for k in range(len(counts.shape) - 1)])
nonnans.append(slice(1, None, None)) # we're gonna get rid only of the nan's, and keep the overflow edges
nonnans = tuple(nonnans)
cumulative_grid = np.cumsum(counts.__getitem__(nonnans), -1) # convert to cumulative grid
totalcounts = np.sum(counts.__getitem__(nonnans), -1)
empty = totalcounts == 0
original_shape = counts.shape
shape = cumulative_grid.shape # + (original_shape[-1] - 1,) #
counts = np.sum(counts, -1)
edges_floor = np.zeros(shape[:-1] + (2,), dtype=np.int64)
edges_ceil = np.zeros(shape[:-1] + (2,), dtype=np.int64)
# if we have an off # of elements, say, N=3, the center is at i=1=(N-1)/2
# if we have an even # of elements, say, N=4, the center is between i=1=(N-2)/2 and i=2=(N/2)
# index = (shape[-1] -1-3) * percentage/100. # the -3 is for the edges
waslist_percentage, [percentages, ] = vaex.utils.listify(percentage)
percentiles = []
for p in percentages:
if p == 0:
percentiles.append(percentile_limits[i][0])
continue
if p == 100:
percentiles.append(percentile_limits[i][1])
continue
values = np.array((totalcounts + 1) * p / 100.) # make sure it's an ndarray
values[empty] = 0
floor_values = np.array(np.floor(values))
ceil_values = np.array(np.ceil(values))
vaex.vaexfast.grid_find_edges(cumulative_grid, floor_values, edges_floor)
vaex.vaexfast.grid_find_edges(cumulative_grid, ceil_values, edges_ceil)
def index_choose(a, indices):
# alternative to np.choise, which doesn't like the last dim to be >= 32
# print(a, indices)
out = np.zeros(a.shape[:-1])
# print(out.shape)
for i in np.ndindex(out.shape):
# print(i, indices[i])
out[i] = a[i + (indices[i],)]
return out
def calculate_x(edges, values):
left, right = edges[..., 0], edges[..., 1]
left_value = index_choose(cumulative_grid, left)
right_value = index_choose(cumulative_grid, right)
with np.errstate(divide='ignore', invalid='ignore'):
u = np.array((values - left_value) / (right_value - left_value))
# TODO: should it really be -3? not -2
xleft, xright = percentile_limits[i][0] + (left - 0.5) * (percentile_limits[i][1] - percentile_limits[i][0]) / (shape[-1] - 3),\
percentile_limits[i][0] + (right - 0.5) * (percentile_limits[i][1] - percentile_limits[i][0]) / (shape[-1] - 3)
x = xleft + (xright - xleft) * u # /2
return x
x1 = calculate_x(edges_floor, floor_values)
x2 = calculate_x(edges_ceil, ceil_values)
u = values - floor_values
x = x1 + (x2 - x1) * u
percentiles.append(x)
percentile = vaex.utils.unlistify(waslist_percentage, np.array(percentiles))
results.append(percentile)
return results
shape = _expand_shape(shape, len(binby))
percentile_shapes = _expand_shape(percentile_shape, len(expressions))
if percentile_limits:
percentile_limits = _expand_limits(percentile_limits, len(expressions))
limits = self.limits(binby, limits, selection=selection, delay=True)
percentile_limits = self.limits(expressions, percentile_limits, selection=selection, delay=True)
@delayed
def calculation(limits, percentile_limits):
# print(">>>", expressions, percentile_limits)
# print(percentile_limits[0], list(percentile_limits[0]))
# print(list(np.array(limits).tolist()) + list(percentile_limits[0]))
# print("limits", limits, expressions, percentile_limits, ">>", list(limits) + [list(percentile_limits[0]))
tasks = [calculate(expression, tuple(shape) + (percentile_shape, ), list(limits) + [list(percentile_limit)])
for percentile_shape, percentile_limit, expression
in zip(percentile_shapes, percentile_limits, expressions)]
return finish(percentile_limits, delayed_args(*tasks))
# return tasks
result = calculation(limits, percentile_limits)
@delayed
def finish2(grid):
value = vaex.utils.unlistify(waslist, np.array(grid))
return value
return self._delay(delay, finish2(result))
def _use_delay(self, delay):
return delay == True
def _delay(self, delay, task, progressbar=False):
if task.isRejected:
task.get()
if delay:
return task
else:
self.execute()
return task.get()
@docsubst
def limits_percentage(self, expression, percentage=99.73, square=False, selection=False, delay=False):
"""Calculate the [min, max] range for expression, containing approximately a percentage of the data as defined
by percentage.
The range is symmetric around the median, i.e., for a percentage of 90, this gives the same results as:
Example:
>>> df.limits_percentage("x", 90)
array([-12.35081376, 12.14858052]
>>> df.percentile_approx("x", 5), df.percentile_approx("x", 95)
(array([-12.36813152]), array([ 12.13275818]))
NOTE: this value is approximated by calculating the cumulative distribution on a grid.
NOTE 2: The values above are not exactly the same, since percentile and limits_percentage do not share the same code
:param expression: {expression_limits}
:param float percentage: Value between 0 and 100
:param delay: {delay}
:return: {return_limits}
"""
import scipy
logger.info("limits_percentage for %r, with percentage=%r", expression, percentage)
waslist, [expressions, ] = vaex.utils.listify(expression)
limits = []
for expr in expressions:
limits_minmax = self.minmax(expr, selection=selection)
vmin, vmax = limits_minmax
size = 1024 * 16
counts = self.count(binby=expr, shape=size, limits=limits_minmax, selection=selection)
cumcounts = np.concatenate([[0], np.cumsum(counts)])
cumcounts = cumcounts / cumcounts.max()
# TODO: this is crude.. see the details!
f = (1 - percentage / 100.) / 2
x = np.linspace(vmin, vmax, size + 1)
l = scipy.interp([f, 1 - f], cumcounts, x)
limits.append(l)
return vaex.utils.unlistify(waslist, limits)
@docsubst
def limits(self, expression, value=None, square=False, selection=None, delay=False, shape=None):
"""Calculate the [min, max] range for expression, as described by value, which is 'minmax' by default.
If value is a list of the form [minvalue, maxvalue], it is simply returned, this is for convenience when using mixed
forms.
Example:
>>> import vaex
>>> df = vaex.example()
>>> df.limits("x")
array([-128.293991, 271.365997])
>>> df.limits("x", "99.7%")
array([-28.86381927, 28.9261226 ])
>>> df.limits(["x", "y"])
(array([-128.293991, 271.365997]), array([ -71.5523682, 146.465836 ]))
>>> df.limits(["x", "y"], "99.7%")
(array([-28.86381927, 28.9261226 ]), array([-28.60476934, 28.96535249]))
>>> df.limits(["x", "y"], ["minmax", "90%"])
(array([-128.293991, 271.365997]), array([-13.37438402, 13.4224423 ]))
>>> df.limits(["x", "y"], ["minmax", [0, 10]])
(array([-128.293991, 271.365997]), [0, 10])
:param expression: {expression_limits}
:param value: {limits}
:param selection: {selection}
:param delay: {delay}
:return: {return_limits}
"""
if expression == []:
return [] if shape is None else ([], [])
waslist, [expressions, ] = vaex.utils.listify(expression)
expressions = _ensure_strings_from_expressions(expressions)
selection = _ensure_strings_from_expressions(selection)
if value is None:
value = "minmax"
if _is_limit(value) or not _issequence(value):
values = (value,) * len(expressions)
else:
values = value
# we cannot hash arrow arrays
values = [vaex.array_types.to_numpy(k) if isinstance(k, vaex.array_types.supported_arrow_array_types) else k for k in values]
initial_expressions, initial_values = expressions, values
expression_values = dict()
expression_shapes = dict()
for i, (expression, value) in enumerate(zip(expressions, values)):
if _issequence(expression):
expressions = expression
nested = True
else:
expressions = [expression]
nested = False
if _is_limit(value) or not _issequence(value):
values = (value,) * len(expressions)
else:
values = value
for j, (expression, value) in enumerate(zip(expressions, values)):
if shape is not None:
if _issequence(shape):
shapes = shape
else:
shapes = (shape, ) * (len(expressions) if nested else len(initial_expressions))
shape_index = j if nested else i
if not _is_limit(value):
expression_values[(expression, value)] = None
if self.is_category(expression):
N = self._categories[_ensure_string_from_expression(expression)]['N']
expression_shapes[expression] = min(N, shapes[shape_index] if shape is not None else default_shape)
else:
expression_shapes[expression] = shapes[shape_index] if shape is not None else default_shape
limits_list = []
for expression, value in expression_values.keys():
if self.is_category(expression):
N = self._categories[_ensure_string_from_expression(expression)]['N']
limits = [-0.5, N-0.5]
else:
if isinstance(value, six.string_types):
if value == "minmax":
limits = self.minmax(expression, selection=selection, delay=True)
else:
match = re.match(r"([\d.]*)(\D*)", value)
if match is None:
raise ValueError("do not understand limit specifier %r, examples are 90%, 3sigma")
else:
number, type = match.groups()
import ast
number = ast.literal_eval(number)
type = type.strip()
if type in ["s", "sigma"]:
limits = self.limits_sigma(number)
elif type in ["ss", "sigmasquare"]:
limits = self.limits_sigma(number, square=True)
elif type in ["%", "percent"]:
limits = self.limits_percentage(expression, number, selection=selection, delay=False)
elif type in ["%s", "%square", "percentsquare"]:
limits = self.limits_percentage(expression, number, selection=selection, square=True, delay=True)
elif value is None:
limits = self.minmax(expression, selection=selection, delay=True)
else:
limits = value
limits_list.append(limits)
if limits is None:
raise ValueError("limit %r not understood" % value)
expression_values[(expression, value)] = limits
limits_list = delayed_args(*limits_list)
@delayed
def finish(limits_list):
# print("##### 2)", expression_values.keys())
limits_outer = []
shapes_list = []
for expression, value in zip(initial_expressions, initial_values):
if _issequence(expression):
expressions = expression
waslist2 = True
else:
expressions = [expression]
waslist2 = False
if _is_limit(value) or not _issequence(value):
values = (value,) * len(expressions)
else:
values = value
# print("expressions 3)", expressions)
# print("values 3)", values)
limits = []
shapes = []
for expression, value in zip(expressions, values):
if not _is_limit(value):
value = expression_values[(expression, value)]
if not _is_limit(value):
# print(">>> value", value)
value = value.get()
limits.append(value)
shapes.append(expression_shapes[expression])
# if not _is_limit(value): # if a
# #value = tuple(value) # list is not hashable
# expression_values[(expression, value)] = expression_values[(expression, value)].get()
# else:
# #value = tuple(value) # list is not hashable
# expression_values[(expression, value)] = ()
if waslist2:
limits_outer.append(limits)
shapes_list.append(shapes)
else:
limits_outer.append(limits[0])
shapes_list.append(shapes[0])
# logger.debug(">>>>>>>> complete list of limits: %r %r", limits_list, np.array(limits_list).shape)
# print("limits", limits_outer)
if shape:
return vaex.utils.unlistify(waslist, limits_outer), vaex.utils.unlistify(waslist, shapes_list)
else:
return vaex.utils.unlistify(waslist, limits_outer)
return self._delay(delay, finish(limits_list))
def mode(self, expression, binby=[], limits=None, shape=256, mode_shape=64, mode_limits=None, progressbar=False, selection=None):
"""Calculate/estimate the mode."""
if len(binby) == 0:
raise ValueError("only supported with binby argument given")
else:
# todo, fix progressbar into two...
try:
len(shape)
shape = tuple(shape)
except:
shape = len(binby) * (shape,)
shape = (mode_shape,) + shape
subspace = self(*(list(binby) + [expression]))
if selection:
subspace = subspace.selected()
limits = self.limits(list(binby), limits)
mode_limits = self.limits([expression], mode_limits)
limits = list(limits) + list(mode_limits)
counts = subspace.histogram(limits=limits, size=shape, progressbar=progressbar)
indices = np.argmax(counts, axis=0)
pmin, pmax = limits[-1]
centers = np.linspace(pmin, pmax, mode_shape + 1)[:-1] # ignore last bin
centers += (centers[1] - centers[0]) / 2 # and move half a bin to the right
modes = centers[indices]
ok = counts.sum(axis=0) > 0
modes[~ok] = np.nan
return modes
@vaex.utils.deprecated('use df.widget.heatmap')
def plot_widget(self, x, y, limits=None, f="identity", **kwargs):
return self.widget.heatmap(x, y, limits=limits, transform=f, **kwargs)
@vaex.utils.deprecated('use plot_widget')
def plot_bq(self, x, y, grid=None, shape=256, limits=None, what="count(*)", figsize=None,
f="identity", figure_key=None, fig=None, axes=None, xlabel=None, ylabel=None, title=None,
show=True, selection=[None, True], colormap="afmhot", grid_limits=None, normalize="normalize",
grid_before=None,
what_kwargs={}, type="default",
scales=None, tool_select=False, bq_cleanup=True,
**kwargs):
import vaex.ext.bqplot
cls = vaex.ext.bqplot.get_class(type)
plot2d = cls(df=self, x=x, y=y, grid=grid, shape=shape, limits=limits, what=what,
f=f, figure_key=figure_key, fig=fig,
selection=selection, grid_before=grid_before,
grid_limits=grid_limits, normalize=normalize, colormap=colormap, what_kwargs=what_kwargs, **kwargs)
if show:
plot2d.show()
return plot2d
# @_hidden
def healpix_count(self, expression=None, healpix_expression=None, healpix_max_level=12, healpix_level=8, binby=None, limits=None, shape=default_shape, delay=False, progress=None, selection=None):
"""Count non missing value for expression on an array which represents healpix data.
:param expression: Expression or column for which to count non-missing values, or None or '*' for counting the rows
:param healpix_expression: {healpix_max_level}
:param healpix_max_level: {healpix_max_level}
:param healpix_level: {healpix_level}
:param binby: {binby}, these dimension follow the first healpix dimension.
:param limits: {limits}
:param shape: {shape}
:param selection: {selection}
:param delay: {delay}
:param progress: {progress}
:return:
"""
# if binby is None:
import healpy as hp
if healpix_expression is None:
if self.ucds.get("source_id", None) == 'meta.id;meta.main': # we now assume we have gaia data
healpix_expression = "source_id/34359738368"
if healpix_expression is None:
raise ValueError("no healpix_expression given, and was unable to guess")
reduce_level = healpix_max_level - healpix_level
NSIDE = 2**healpix_level
nmax = hp.nside2npix(NSIDE)
scaling = 4**reduce_level
expr = "%s/%s" % (healpix_expression, scaling)
binby = [expr] + ([] if binby is None else _ensure_list(binby))
shape = (nmax,) + _expand_shape(shape, len(binby) - 1)
epsilon = 1. / scaling / 2
limits = [[-epsilon, nmax - epsilon]] + ([] if limits is None else limits)
return self.count(expression, binby=binby, limits=limits, shape=shape, delay=delay, progress=progress, selection=selection)
# @_hidden
def healpix_plot(self, healpix_expression="source_id/34359738368", healpix_max_level=12, healpix_level=8, what="count(*)", selection=None,
grid=None,
healpix_input="equatorial", healpix_output="galactic", f=None,
colormap="afmhot", grid_limits=None, image_size=800, nest=True,
figsize=None, interactive=False, title="", smooth=None, show=False, colorbar=True,
rotation=(0, 0, 0), **kwargs):
"""Viz data in 2d using a healpix column.
:param healpix_expression: {healpix_max_level}
:param healpix_max_level: {healpix_max_level}
:param healpix_level: {healpix_level}
:param what: {what}
:param selection: {selection}
:param grid: {grid}
:param healpix_input: Specificy if the healpix index is in "equatorial", "galactic" or "ecliptic".
:param healpix_output: Plot in "equatorial", "galactic" or "ecliptic".
:param f: function to apply to the data
:param colormap: matplotlib colormap
:param grid_limits: Optional sequence [minvalue, maxvalue] that determine the min and max value that map to the colormap (values below and above these are clipped to the the min/max). (default is [min(f(grid)), max(f(grid)))
:param image_size: size for the image that healpy uses for rendering
:param nest: If the healpix data is in nested (True) or ring (False)
:param figsize: If given, modify the matplotlib figure size. Example (14,9)
:param interactive: (Experimental, uses healpy.mollzoom is True)
:param title: Title of figure
:param smooth: apply gaussian smoothing, in degrees
:param show: Call matplotlib's show (True) or not (False, defaut)
:param rotation: Rotatate the plot, in format (lon, lat, psi) such that (lon, lat) is the center, and rotate on the screen by angle psi. All angles are degrees.
:return:
"""
# plot_level = healpix_level #healpix_max_level-reduce_level
import healpy as hp
import pylab as plt
if grid is None:
reduce_level = healpix_max_level - healpix_level
NSIDE = 2**healpix_level
nmax = hp.nside2npix(NSIDE)
# print nmax, np.sqrt(nmax)
scaling = 4**reduce_level
# print nmax
epsilon = 1. / scaling / 2
grid = self._stat(what=what, binby="%s/%s" % (healpix_expression, scaling), limits=[-epsilon, nmax - epsilon], shape=nmax, selection=selection)
if grid_limits:
grid_min, grid_max = grid_limits
else:
grid_min = grid_max = None
f_org = f
f = _parse_f(f)
if smooth:
if nest:
grid = hp.reorder(grid, inp="NEST", out="RING")
nest = False
# grid[np.isnan(grid)] = np.nanmean(grid)
grid = hp.smoothing(grid, sigma=np.radians(smooth))
fgrid = f(grid)
coord_map = dict(equatorial='C', galactic='G', ecliptic="E")
fig = plt.gcf()
if figsize is not None:
fig.set_size_inches(*figsize)
what_label = what
if f_org:
what_label = f_org + " " + what_label
f = hp.mollzoom if interactive else hp.mollview
with warnings.catch_warnings():
warnings.simplefilter("ignore")
coord = coord_map[healpix_input], coord_map[healpix_output]
if coord_map[healpix_input] == coord_map[healpix_output]:
coord = None
f(fgrid, unit=what_label, rot=rotation, nest=nest, title=title, coord=coord,
cmap=colormap, hold=True, xsize=image_size, min=grid_min, max=grid_max, cbar=colorbar, **kwargs)
if show:
plt.show()
@docsubst
@stat_1d
def _stat(self, what="count(*)", what_kwargs={}, binby=[], limits=None, shape=default_shape, selection=False, delay=False, progress=None):
waslist_what, [whats, ] = vaex.utils.listify(what)
limits = self.limits(binby, limits, delay=True)
waslist_selection, [selections] = vaex.utils.listify(selection)
binby = _ensure_list(binby)
what_labels = []
shape = _expand_shape(shape, len(binby))
total_grid = np.zeros((len(whats), len(selections)) + shape, dtype=float)
@delayed
def copy_grids(grids):
total_grid[index] = grid
@delayed
def get_whats(limits):
grids = []
for j, what in enumerate(whats):
what = what.strip()
index = what.index("(")
groups = re.match(r"(.*)\((.*)\)", what).groups()
if groups and len(groups) == 2:
function = groups[0]
arguments = groups[1].strip()
if "," in arguments:
arguments = arguments.split(",")
functions = ["mean", "sum", "std", "var", "correlation", "covar", "min", "max"]
unit_expression = None
if function in ["mean", "sum", "std", "min", "max"]:
unit_expression = arguments
if function in ["var"]:
unit_expression = "(%s) * (%s)" % (arguments, arguments)
if function in ["covar"]:
unit_expression = "(%s) * (%s)" % arguments
if unit_expression:
unit = self.unit(unit_expression)
if unit:
what_units = unit.to_string('latex_inline')
if function in functions:
grid = getattr(self, function)(arguments, binby=binby, limits=limits, shape=shape,
selection=selections, progress=progress, delay=delay)
elif function == "count":
grid = self.count(arguments, binby, shape=shape, limits=limits, selection=selections,
progress=progress, delay=delay)
else:
raise ValueError("Could not understand method: %s, expected one of %r'" % (function, functions))
# what_labels.append(what_label)
grids.append(grid)
# else:
# raise ValueError("Could not understand 'what' argument %r, expected something in form: 'count(*)', 'mean(x)'" % what)
return grids
grids = get_whats(limits)
# print grids
# grids = delayed_args(*grids)
@delayed
def finish(grids):
for i, grid in enumerate(grids):
total_grid[i] = grid
return total_grid[slice(None, None, None) if waslist_what else 0, slice(None, None, None) if waslist_selection else 0]
s = finish(delayed_list(grids))
return self._delay(delay, s)
plot = _requires('viz')
plot1d = _requires('viz')
scatter = _requires('viz')
def plot3d(self, x, y, z, vx=None, vy=None, vz=None, vwhat=None, limits=None, grid=None, what="count(*)", shape=128, selection=[None, True], f=None,
vcount_limits=None,
smooth_pre=None, smooth_post=None, grid_limits=None, normalize="normalize", colormap="afmhot",
figure_key=None, fig=None,
lighting=True, level=[0.1, 0.5, 0.9], opacity=[0.01, 0.05, 0.1], level_width=0.1,
show=True, **kwargs):
"""Use at own risk, requires ipyvolume"""
import vaex.ext.ipyvolume
# vaex.ext.ipyvolume.
cls = vaex.ext.ipyvolume.PlotDefault
plot3d = cls(df=self, x=x, y=y, z=z, vx=vx, vy=vy, vz=vz,
grid=grid, shape=shape, limits=limits, what=what,
f=f, figure_key=figure_key, fig=fig,
selection=selection, smooth_pre=smooth_pre, smooth_post=smooth_post,
grid_limits=grid_limits, vcount_limits=vcount_limits, normalize=normalize, colormap=colormap, **kwargs)
if show:
plot3d.show()
return plot3d
@property
def col(self):
"""Gives direct access to the columns only (useful for tab completion).
Convenient when working with ipython in combination with small DataFrames, since this gives tab-completion.
Columns can be accessed by their names, which are attributes. The attributes are currently expressions, so you can
do computations with them.
Example
>>> ds = vaex.example()
>>> df.plot(df.col.x, df.col.y)
"""
class ColumnList(object):
pass
data = ColumnList()
for name in self.get_column_names():
expression = getattr(self, name, None)
if not isinstance(expression, Expression):
expression = Expression(self, name)
setattr(data, name, expression)
return data
def close(self):
"""Close any possible open file handles or other resources, the DataFrame will not be in a usable state afterwards."""
self.dataset.close()
def byte_size(self, selection=False, virtual=False):
"""Return the size in bytes the whole DataFrame requires (or the selection), respecting the active_fraction."""
bytes_per_row = 0
N = self.count(selection=selection)
extra = 0
for column in list(self.get_column_names(virtual=virtual)):
dtype = self.data_type(column)
dtype_internal = self.data_type(column, internal=True)
#if dtype in [str_type, str] and dtype_internal.kind == 'O':
if self.is_string(column):
# TODO: document or fix this
# is it too expensive to calculate this exactly?
extra += self.columns[column].nbytes
else:
bytes_per_row += dtype_internal.itemsize
if np.ma.isMaskedArray(self.columns[column]):
bytes_per_row += 1
return bytes_per_row * self.count(selection=selection) + extra
@property
def nbytes(self):
"""Alias for `df.byte_size()`, see :meth:`DataFrame.byte_size`."""
return self.byte_size()
def _shape_of(self, expression, filtered=True):
# TODO: we don't seem to need it anymore, would expect a valid_expression() call
# if check_alias:
# if str(expression) in self._column_aliases:
# expression = self._column_aliases[str(expression)] # translate the alias name into the real name
sample = self.evaluate(expression, 0, 1, filtered=False, array_type="numpy", parallel=False)
sample = vaex.array_types.to_numpy(sample, strict=True)
rows = len(self) if filtered else self.length_unfiltered()
return (rows,) + sample.shape[1:]
def data_type(self, expression, array_type=None, internal=False):
"""Return the datatype for the given expression, if not a column, the first row will be evaluated to get the data type.
Example:
>>> df = vaex.from_scalars(x=1, s='Hi')
:param str array_type: 'numpy', 'arrow' or None, to indicate if the data type should be converted
"""
expression = _ensure_string_from_expression(expression)
data_type = None
expression = vaex.utils.valid_expression(self.get_column_names(hidden=True), expression)
if expression in self.variables:
data_type = np.float64(1).dtype
elif self.is_local() and expression in self.columns.keys():
column = self.columns[expression]
if hasattr(column, 'dtype'):
# TODO: this probably would use data_type
# to support Columns that wrap arrow arrays
data_type = column.dtype
else:
data = column[0:1]
else:
try:
data = self.evaluate(expression, 0, 1, filtered=False, array_type=array_type, parallel=False)
except:
data = self.evaluate(expression, 0, 1, filtered=True, array_type=array_type, parallel=False)
if data_type is None:
# means we have to determine it from the data
if isinstance(data, np.ndarray):
data_type = data.dtype
elif isinstance(data, Column):
data = data.to_arrow()
data_type = data.type
else:
data_type = data.type # assuming arrow
if array_type == "arrow":
data_type = array_types.to_arrow_type(data_type)
return data_type
elif array_type == "numpy":
data_type = array_types.to_numpy_type(data_type)
elif array_type is None:
pass
else:
raise ValueError(f'Unknown array_type {array_type}')
if not internal:
if isinstance(data_type, np.dtype) and data_type.kind in 'US':
return pa.string()
return data_type
@property
def dtypes(self):
"""Gives a Pandas series object containing all numpy dtypes of all columns (except hidden)."""
from pandas import Series
return Series({column_name:self.data_type(column_name) for column_name in self.get_column_names()})
def is_masked(self, column):
'''Return if a column is a masked (numpy.ma) column.'''
column = _ensure_string_from_expression(column)
if column in self.columns:
column = self.columns[column]
if isinstance(column, np.ndarray):
return np.ma.isMaskedArray(column)
else:
# in case the column is not a numpy array, we take a small slice
# which should return a numpy array
return np.ma.isMaskedArray(column[0:1])
else:
ar = self.evaluate(column, i1=0, i2=1, parallel=False)
if isinstance(ar, np.ndarray) and np.ma.isMaskedArray(ar):
return True
return False
def label(self, expression, unit=None, output_unit=None, format="latex_inline"):
label = expression
unit = unit or self.unit(expression)
try: # if we can convert the unit, use that for the labeling
if output_unit and unit: # avoid unnecessary error msg'es
output_unit.to(unit)
unit = output_unit
except:
logger.exception("unit error")
if unit is not None:
label = "%s (%s)" % (label, unit.to_string('latex_inline'))
return label
def unit(self, expression, default=None):
"""Returns the unit (an astropy.unit.Units object) for the expression.
Example
>>> import vaex
>>> ds = vaex.example()
>>> df.unit("x")
Unit("kpc")
>>> df.unit("x*L")
Unit("km kpc2 / s")
:param expression: Expression, which can be a column name
:param default: if no unit is known, it will return this
:return: The resulting unit of the expression
:rtype: astropy.units.Unit
"""
expression = _ensure_string_from_expression(expression)
try:
# if an expression like pi * <some_expr> it will evaluate to a quantity instead of a unit
unit_or_quantity = eval(expression, expression_namespace, scopes.UnitScope(self))
unit = unit_or_quantity.unit if hasattr(unit_or_quantity, "unit") else unit_or_quantity
return unit if isinstance(unit, astropy.units.Unit) else None
except:
# logger.exception("error evaluating unit expression: %s", expression)
# astropy doesn't add units, so we try with a quatiti
try:
return eval(expression, expression_namespace, scopes.UnitScope(self, 1.)).unit
except:
# logger.exception("error evaluating unit expression: %s", expression)
return default
def ucd_find(self, ucds, exclude=[]):
"""Find a set of columns (names) which have the ucd, or part of the ucd.
Prefixed with a ^, it will only match the first part of the ucd.
Example
>>> df.ucd_find('pos.eq.ra', 'pos.eq.dec')
['RA', 'DEC']
>>> df.ucd_find('pos.eq.ra', 'doesnotexist')
>>> df.ucds[df.ucd_find('pos.eq.ra')]
'pos.eq.ra;meta.main'
>>> df.ucd_find('meta.main')]
'dec'
>>> df.ucd_find('^meta.main')]
"""
if isinstance(ucds, six.string_types):
ucds = [ucds]
if len(ucds) == 1:
ucd = ucds[0]
if ucd[0] == "^": # we want it to start with
ucd = ucd[1:]
columns = [name for name in self.get_column_names() if self.ucds.get(name, "").startswith(ucd) and name not in exclude]
else:
columns = [name for name in self.get_column_names() if ucd in self.ucds.get(name, "") and name not in exclude]
return None if len(columns) == 0 else columns[0]
else:
columns = [self.ucd_find([ucd], exclude=exclude) for ucd in ucds]
return None if None in columns else columns
@vaex.utils.deprecated('Will most likely disappear or move')
@_hidden
def selection_favorite_add(self, name, selection_name="default"):
selection = self.get_selection(name=selection_name)
if selection:
self.favorite_selections[name] = selection
self.selections_favorite_store()
else:
raise ValueError("no selection exists")
@vaex.utils.deprecated('Will most likely disappear or move')
@_hidden
def selection_favorite_remove(self, name):
del self.favorite_selections[name]
self.selections_favorite_store()
@vaex.utils.deprecated('Will most likely disappear or move')
@_hidden
def selection_favorite_apply(self, name, selection_name="default", executor=None):
self.set_selection(self.favorite_selections[name], name=selection_name, executor=executor)
@vaex.utils.deprecated('Will most likely disappear or move')
@_hidden
def selections_favorite_store(self):
path = os.path.join(self.get_private_dir(create=True), "favorite_selection.yaml")
selections = collections.OrderedDict([(key, value.to_dict()) for key, value in self.favorite_selections.items()])
vaex.utils.write_json_or_yaml(path, selections)
@vaex.utils.deprecated('Will most likely disappear or move')
@_hidden
def selections_favorite_load(self):
try:
path = os.path.join(self.get_private_dir(create=True), "favorite_selection.yaml")
if os.path.exists(path):
selections_dict = vaex.utils.read_json_or_yaml(path)
for key, value in selections_dict.items():
self.favorite_selections[key] = selections.selection_from_dict(self, value)
except:
logger.exception("non fatal error")
def get_private_dir(self, create=False):
"""Each DataFrame has a directory where files are stored for metadata etc.
Example
>>> import vaex
>>> ds = vaex.example()
>>> vaex.get_private_dir()
'/Users/users/breddels/.vaex/dfs/_Users_users_breddels_vaex-testing_data_helmi-dezeeuw-2000-10p.hdf5'
:param bool create: is True, it will create the directory if it does not exist
"""
if self.is_local():
name = os.path.abspath(self.path).replace(os.path.sep, "_")[:250] # should not be too long for most os'es
name = name.replace(":", "_") # for windows drive names
else:
server = self.server
name = "%s_%s_%s_%s" % (server.hostname, server.port, server.base_path.replace("/", "_"), self.name)
dir = os.path.join(vaex.utils.get_private_dir(), "dfs", name)
if create and not os.path.exists(dir):
os.makedirs(dir)
return dir
def state_get(self):
"""Return the internal state of the DataFrame in a dictionary
Example:
>>> import vaex
>>> df = vaex.from_scalars(x=1, y=2)
>>> df['r'] = (df.x**2 + df.y**2)**0.5
>>> df.state_get()
{'active_range': [0, 1],
'column_names': ['x', 'y', 'r'],
'description': None,
'descriptions': {},
'functions': {},
'renamed_columns': [],
'selections': {'__filter__': None},
'ucds': {},
'units': {},
'variables': {},
'virtual_columns': {'r': '(((x ** 2) + (y ** 2)) ** 0.5)'}}
"""
virtual_names = list(self.virtual_columns.keys()) + list(self.variables.keys())
units = {key: str(value) for key, value in self.units.items()}
ucds = {key: value for key, value in self.ucds.items() if key in virtual_names}
descriptions = {key: value for key, value in self.descriptions.items()}
import vaex.serialize
def check(key, value):
if not vaex.serialize.can_serialize(value.f):
warnings.warn('Cannot serialize function for virtual column {} (use vaex.serialize.register)'.format(key))
return False
return True
def clean(value):
return vaex.serialize.to_dict(value.f)
functions = {key: clean(value) for key, value in self.functions.items() if check(key, value)}
virtual_columns = {key: value for key, value in self.virtual_columns.items()}
selections = {name: self.get_selection(name) for name, history in self.selection_histories.items()}
selections = {name: selection.to_dict() if selection is not None else None for name, selection in selections.items()}
# if selection is not None}
state = dict(virtual_columns=virtual_columns,
column_names=self.column_names,
renamed_columns=self._renamed_columns,
variables=self.variables,
functions=functions,
selections=selections,
ucds=ucds,
units=units,
descriptions=descriptions,
description=self.description,
active_range=[self._index_start, self._index_end])
return state
def state_set(self, state, use_active_range=False, trusted=True):
"""Sets the internal state of the df
Example:
>>> import vaex
>>> df = vaex.from_scalars(x=1, y=2)
>>> df
# x y r
0 1 2 2.23607
>>> df['r'] = (df.x**2 + df.y**2)**0.5
>>> state = df.state_get()
>>> state
{'active_range': [0, 1],
'column_names': ['x', 'y', 'r'],
'description': None,
'descriptions': {},
'functions': {},
'renamed_columns': [],
'selections': {'__filter__': None},
'ucds': {},
'units': {},
'variables': {},
'virtual_columns': {'r': '(((x ** 2) + (y ** 2)) ** 0.5)'}}
>>> df2 = vaex.from_scalars(x=3, y=4)
>>> df2.state_set(state) # now the virtual functions are 'copied'
>>> df2
# x y r
0 3 4 5
:param state: dict as returned by :meth:`DataFrame.state_get`.
:param bool use_active_range: Whether to use the active range or not.
"""
self.description = state['description']
if use_active_range:
self._index_start, self._index_end = state['active_range']
self._length_unfiltered = self._index_end - self._index_start
if 'renamed_columns' in state:
for old, new in state['renamed_columns']:
self._rename(old, new)
for name, value in state['functions'].items():
self.add_function(name, vaex.serialize.from_dict(value, trusted=trusted))
if 'column_names' in state:
# we clear all columns, and add them later on, since otherwise self[name] = ... will try
# to rename the columns (which is unsupported for remote dfs)
self.column_names = []
self.virtual_columns = {}
for name, value in state['virtual_columns'].items():
self[name] = self._expr(value)
# self._save_assign_expression(name)
self.column_names = list(state['column_names'])
for name in self.column_names:
self._save_assign_expression(name)
else:
# old behaviour
self.virtual_columns = {}
for name, value in state['virtual_columns'].items():
self[name] = self._expr(value)
self.variables = state['variables']
import astropy # TODO: make this dep optional?
units = {key: astropy.units.Unit(value) for key, value in state["units"].items()}
self.units.update(units)
for name, selection_dict in state['selections'].items():
# TODO: make selection use the vaex.serialize framework
if selection_dict is None:
selection = None
else:
selection = selections.selection_from_dict(selection_dict)
self.set_selection(selection, name=name)
def state_write(self, f):
"""Write the internal state to a json or yaml file (see :meth:`DataFrame.state_get`)
Example
>>> import vaex
>>> df = vaex.from_scalars(x=1, y=2)
>>> df['r'] = (df.x**2 + df.y**2)**0.5
>>> df.state_write('state.json')
>>> print(open('state.json').read())
{
"virtual_columns": {
"r": "(((x ** 2) + (y ** 2)) ** 0.5)"
},
"column_names": [
"x",
"y",
"r"
],
"renamed_columns": [],
"variables": {
"pi": 3.141592653589793,
"e": 2.718281828459045,
"km_in_au": 149597870.7,
"seconds_per_year": 31557600
},
"functions": {},
"selections": {
"__filter__": null
},
"ucds": {},
"units": {},
"descriptions": {},
"description": null,
"active_range": [
0,
1
]
}
>>> df.state_write('state.yaml')
>>> print(open('state.yaml').read())
active_range:
- 0
- 1
column_names:
- x
- y
- r
description: null
descriptions: {}
functions: {}
renamed_columns: []
selections:
__filter__: null
ucds: {}
units: {}
variables:
pi: 3.141592653589793
e: 2.718281828459045
km_in_au: 149597870.7
seconds_per_year: 31557600
virtual_columns:
r: (((x ** 2) + (y ** 2)) ** 0.5)
:param str f: filename (ending in .json or .yaml)
"""
vaex.utils.write_json_or_yaml(f, self.state_get())
def state_load(self, f, use_active_range=False):
"""Load a state previously stored by :meth:`DataFrame.state_write`, see also :meth:`DataFrame.state_set`."""
state = vaex.utils.read_json_or_yaml(f)
self.state_set(state, use_active_range=use_active_range)
def remove_virtual_meta(self):
"""Removes the file with the virtual column etc, it does not change the current virtual columns etc."""
dir = self.get_private_dir(create=True)
path = os.path.join(dir, "virtual_meta.yaml")
try:
if os.path.exists(path):
os.remove(path)
if not os.listdir(dir):
os.rmdir(dir)
except:
logger.exception("error while trying to remove %s or %s", path, dir)
# def remove_meta(self):
# path = os.path.join(self.get_private_dir(create=True), "meta.yaml")
# os.remove(path)
@_hidden
def write_virtual_meta(self):
"""Writes virtual columns, variables and their ucd,description and units.
The default implementation is to write this to a file called virtual_meta.yaml in the directory defined by
:func:`DataFrame.get_private_dir`. Other implementation may store this in the DataFrame file itself.
This method is called after virtual columns or variables are added. Upon opening a file, :func:`DataFrame.update_virtual_meta`
is called, so that the information is not lost between sessions.
Note: opening a DataFrame twice may result in corruption of this file.
"""
path = os.path.join(self.get_private_dir(create=True), "virtual_meta.yaml")
virtual_names = list(self.virtual_columns.keys()) + list(self.variables.keys())
units = {key: str(value) for key, value in self.units.items() if key in virtual_names}
ucds = {key: value for key, value in self.ucds.items() if key in virtual_names}
descriptions = {key: value for key, value in self.descriptions.items() if key in virtual_names}
meta_info = dict(virtual_columns=self.virtual_columns,
variables=self.variables,
ucds=ucds, units=units, descriptions=descriptions)
vaex.utils.write_json_or_yaml(path, meta_info)
@_hidden
def update_virtual_meta(self):
"""Will read back the virtual column etc, written by :func:`DataFrame.write_virtual_meta`. This will be done when opening a DataFrame."""
import astropy.units
try:
path = os.path.join(self.get_private_dir(create=False), "virtual_meta.yaml")
if os.path.exists(path):
meta_info = vaex.utils.read_json_or_yaml(path)
if 'virtual_columns' not in meta_info:
return
self.virtual_columns.update(meta_info["virtual_columns"])
self.variables.update(meta_info["variables"])
self.ucds.update(meta_info["ucds"])
self.descriptions.update(meta_info["descriptions"])
units = {key: astropy.units.Unit(value) for key, value in meta_info["units"].items()}
self.units.update(units)
except:
logger.exception("non fatal error")
@_hidden
def write_meta(self):
"""Writes all meta data, ucd,description and units
The default implementation is to write this to a file called meta.yaml in the directory defined by
:func:`DataFrame.get_private_dir`. Other implementation may store this in the DataFrame file itself.
(For instance the vaex hdf5 implementation does this)
This method is called after virtual columns or variables are added. Upon opening a file, :func:`DataFrame.update_meta`
is called, so that the information is not lost between sessions.
Note: opening a DataFrame twice may result in corruption of this file.
"""
# raise NotImplementedError
path = os.path.join(self.get_private_dir(create=True), "meta.yaml")
units = {key: str(value) for key, value in self.units.items()}
meta_info = dict(description=self.description,
ucds=self.ucds, units=units, descriptions=self.descriptions,
)
vaex.utils.write_json_or_yaml(path, meta_info)
@_hidden
def update_meta(self):
"""Will read back the ucd, descriptions, units etc, written by :func:`DataFrame.write_meta`. This will be done when opening a DataFrame."""
import astropy.units
try:
path = os.path.join(self.get_private_dir(create=False), "meta.yaml")
if os.path.exists(path):
meta_info = vaex.utils.read_json_or_yaml(path)
self.description = meta_info["description"]
self.ucds.update(meta_info["ucds"])
self.descriptions.update(meta_info["descriptions"])
# self.virtual_columns.update(meta_info["virtual_columns"])
# self.variables.update(meta_info["variables"])
units = {key: astropy.units.Unit(value) for key, value in meta_info["units"].items()}
self.units.update(units)
except:
logger.exception("non fatal error, but could read/understand %s", path)
def is_local(self):
"""Returns True if the DataFrame is local, False when a DataFrame is remote."""
raise NotImplementedError
def get_auto_fraction(self):
return self._auto_fraction
def set_auto_fraction(self, enabled):
self._auto_fraction = enabled
@classmethod
def can_open(cls, path, *args, **kwargs):
# """Tests if this class can open the file given by path"""
return False
@classmethod
def get_options(cls, path):
return []
@classmethod
def option_to_args(cls, option):
return []
def combinations(self, expressions_list=None, dimension=2, exclude=None, **kwargs):
"""Generate a list of combinations for the possible expressions for the given dimension.
:param expressions_list: list of list of expressions, where the inner list defines the subspace
:param dimensions: if given, generates a subspace with all possible combinations for that dimension
:param exclude: list of
"""
if dimension is not None:
expressions_list = list(itertools.combinations(self.get_column_names(), dimension))
if exclude is not None:
import six
def excluded(expressions):
if callable(exclude):
return exclude(expressions)
elif isinstance(exclude, six.string_types):
return exclude in expressions
elif isinstance(exclude, (list, tuple)):
# $#expressions = set(expressions)
for e in exclude:
if isinstance(e, six.string_types):
if e in expressions:
return True
elif isinstance(e, (list, tuple)):
if set(e).issubset(expressions):
return True
else:
raise ValueError("elements of exclude should contain a string or a sequence of strings")
else:
raise ValueError("exclude should contain a string, a sequence of strings, or should be a callable")
return False
# test if any of the elements of exclude are a subset of the expression
expressions_list = [expr for expr in expressions_list if not excluded(expr)]
logger.debug("expression list generated: %r", expressions_list)
return expressions_list
def set_variable(self, name, expression_or_value, write=True):
"""Set the variable to an expression or value defined by expression_or_value.
Example
>>> df.set_variable("a", 2.)
>>> df.set_variable("b", "a**2")
>>> df.get_variable("b")
'a**2'
>>> df.evaluate_variable("b")
4.0
:param name: Name of the variable
:param write: write variable to meta file
:param expression: value or expression
"""
self.variables[name] = expression_or_value
# if write:
# self.write_virtual_meta()
def get_variable(self, name):
"""Returns the variable given by name, it will not evaluate it.
For evaluation, see :func:`DataFrame.evaluate_variable`, see also :func:`DataFrame.set_variable`
"""
return self.variables[name]
def evaluate_variable(self, name):
"""Evaluates the variable given by name."""
if isinstance(self.variables[name], six.string_types):
# TODO: this does not allow more than one level deep variable, like a depends on b, b on c, c is a const
value = eval(self.variables[name], expression_namespace, self.variables)
return value
else:
return self.variables[name]
def _evaluate_selection_mask(self, name="default", i1=None, i2=None, selection=None, cache=False, filter_mask=None):
"""Internal use, ignores the filter"""
i1 = i1 or 0
i2 = i2 or len(self)
scope = scopes._BlockScopeSelection(self, i1, i2, selection, cache=cache, filter_mask=filter_mask)
mask = scope.evaluate(name)
# TODO: can we do without arrow->numpy conversion?
mask = vaex.array_types.to_numpy(mask)
return vaex.utils.unmask_selection_mask(mask)
def evaluate_selection_mask(self, name="default", i1=None, i2=None, selection=None, cache=False, filtered=True, pre_filtered=True):
i1 = i1 or 0
i2 = i2 or self.length_unfiltered()
if isinstance(name, vaex.expression.Expression):
# make sure if we get passed an expression, it is converted to a string
# otherwise the name != <sth> will evaluate to an Expression object
name = str(name)
if name in [None, False] and self.filtered and filtered:
scope_global = scopes._BlockScopeSelection(self, i1, i2, None, cache=cache)
mask_global = scope_global.evaluate(FILTER_SELECTION_NAME)
return vaex.utils.unmask_selection_mask(mask_global)
elif self.filtered and filtered and name != FILTER_SELECTION_NAME:
scope_global = scopes._BlockScopeSelection(self, i1, i2, None, cache=cache)
mask_global = scope_global.evaluate(FILTER_SELECTION_NAME)
if pre_filtered:
scope = scopes._BlockScopeSelection(self, i1, i2, selection, filter_mask=vaex.utils.unmask_selection_mask(mask_global))
mask = scope.evaluate(name)
return vaex.utils.unmask_selection_mask(mask)
else: # only used in legacy.py?
scope = scopes._BlockScopeSelection(self, i1, i2, selection)
mask = scope.evaluate(name)
return vaex.utils.unmask_selection_mask(mask & mask_global)
else:
if name in [None, False]:
# # in this case we can
return np.full(i2-i1, True)
scope = scopes._BlockScopeSelection(self, i1, i2, selection, cache=cache)
return vaex.utils.unmask_selection_mask(scope.evaluate(name))
# if _is_string(selection):
def evaluate(self, expression, i1=None, i2=None, out=None, selection=None, filtered=True, array_type=None, parallel=True, chunk_size=None):
"""Evaluate an expression, and return a numpy array with the results for the full column or a part of it.
Note that this is not how vaex should be used, since it means a copy of the data needs to fit in memory.
To get partial results, use i1 and i2
:param str expression: Name/expression to evaluate
:param int i1: Start row index, default is the start (0)
:param int i2: End row index, default is the length of the DataFrame
:param ndarray out: Output array, to which the result may be written (may be used to reuse an array, or write to
a memory mapped array)
:param selection: selection to apply
:return:
"""
if chunk_size is not None:
return self.evaluate_iterator(expression, s1=i1, s2=i2, out=out, selection=selection, filtered=filtered, array_type=array_type, parallel=parallel, chunk_size=chunk_size)
else:
return self._evaluate_implementation(expression, i1=i1, i2=i2, out=out, selection=selection, filtered=filtered, array_type=array_type, parallel=parallel, chunk_size=chunk_size)
def evaluate_iterator(self, expression, s1=None, s2=None, out=None, selection=None, filtered=True, array_type=None, parallel=True, chunk_size=None, prefetch=True):
"""Generator to efficiently evaluate expressions in chunks (number of rows).
See :func:`DataFrame.evaluate` for other arguments.
Example:
>>> import vaex
>>> df = vaex.example()
>>> for i1, i2, chunk in df.evaluate_iterator(df.x, chunk_size=100_000):
... print(f"Total of {i1} to {i2} = {chunk.sum()}")
...
Total of 0 to 100000 = -7460.610158279056
Total of 100000 to 200000 = -4964.85827154921
Total of 200000 to 300000 = -7303.271340043915
Total of 300000 to 330000 = -2424.65234724951
:param prefetch: Prefetch/compute the next chunk in parallel while the current value is yielded/returned.
"""
offset = 0
import concurrent.futures
if not prefetch:
# this is the simple implementation
for l1, l2, i1, i2 in self._unfiltered_chunk_slices(chunk_size):
yield l1, l2, self._evaluate_implementation(expression, i1=i1, i2=i2, out=out, selection=selection, filtered=filtered, array_type=array_type, parallel=parallel, raw=True)
# But this implementation is faster if the main thread work is single threaded
else:
with concurrent.futures.ThreadPoolExecutor(1) as executor:
iter = self._unfiltered_chunk_slices(chunk_size)
def f(i1, i2):
return self._evaluate_implementation(expression, i1=i1, i2=i2, out=out, selection=selection, filtered=filtered, array_type=array_type, parallel=parallel, raw=True)
previous_l1, previous_l2, previous_i1, previous_i2 = next(iter)
# we submit the 1st job
previous = executor.submit(f, previous_i1, previous_i2)
for l1, l2, i1, i2 in iter:
# and we submit the next job before returning the previous, so they run in parallel
# but make sure the previous is done
previous_chunk = previous.result()
current = executor.submit(f, i1, i2)
yield previous_l1, previous_l2, previous_chunk
previous = current
previous_l1, previous_l2 = l1, l2
previous_chunk = previous.result()
yield previous_l1, previous_l2, previous_chunk
@docsubst
def to_items(self, column_names=None, selection=None, strings=True, virtual=True, parallel=True, chunk_size=None, array_type=None):
"""Return a list of [(column_name, ndarray), ...)] pairs where the ndarray corresponds to the evaluated data
:param column_names: list of column names, to export, when None DataFrame.get_column_names(strings=strings, virtual=virtual) is used
:param selection: {selection}
:param strings: argument passed to DataFrame.get_column_names when column_names is None
:param virtual: argument passed to DataFrame.get_column_names when column_names is None
:param parallel: {evaluate_parallel}
:param chunk_size: {chunk_size}
:param array_type: {array_type}
:return: list of (name, ndarray) pairs or iterator of
"""
column_names = column_names or self.get_column_names(strings=strings, virtual=virtual)
if chunk_size is not None:
def iterator():
for i1, i2, chunks in self.evaluate_iterator(column_names, selection=selection, parallel=parallel, chunk_size=chunk_size):
yield i1, i2, list(zip(column_names, [array_types.convert(chunk, array_type) for chunk in chunks]))
return iterator()
else:
return list(zip(column_names, [array_types.convert(chunk, array_type) for chunk in self.evaluate(column_names, selection=selection, parallel=parallel)]))
@docsubst
def to_arrays(self, column_names=None, selection=None, strings=True, virtual=True, parallel=True, chunk_size=None, array_type=None):
"""Return a list of ndarrays
:param column_names: list of column names, to export, when None DataFrame.get_column_names(strings=strings, virtual=virtual) is used
:param selection: {selection}
:param strings: argument passed to DataFrame.get_column_names when column_names is None
:param virtual: argument passed to DataFrame.get_column_names when column_names is None
:param parallel: {evaluate_parallel}
:param chunk_size: {chunk_size}
:param array_type: {array_type}
:return: list of arrays
"""
column_names = column_names or self.get_column_names(strings=strings, virtual=virtual)
if chunk_size is not None:
def iterator():
for i1, i2, chunks in self.evaluate_iterator(column_names, selection=selection, parallel=parallel, chunk_size=chunk_size):
yield i1, i2, [array_types.convert(chunk, array_type) for chunk in chunks]
return iterator()
return [array_types.convert(chunk, array_type) for chunk in self.evaluate(column_names, selection=selection, parallel=parallel)]
@docsubst
def to_dict(self, column_names=None, selection=None, strings=True, virtual=True, parallel=True, chunk_size=None, array_type=None):
"""Return a dict containing the ndarray corresponding to the evaluated data
:param column_names: list of column names, to export, when None DataFrame.get_column_names(strings=strings, virtual=virtual) is used
:param selection: {selection}
:param strings: argument passed to DataFrame.get_column_names when column_names is None
:param virtual: argument passed to DataFrame.get_column_names when column_names is None
:param parallel: {evaluate_parallel}
:param chunk_size: {chunk_size}
:param array_type: {array_type}
:return: dict
"""
column_names = column_names or self.get_column_names(strings=strings, virtual=virtual)
if chunk_size is not None:
def iterator():
for i1, i2, chunks in self.evaluate_iterator(column_names, selection=selection, parallel=parallel, chunk_size=chunk_size):
yield i1, i2, dict(list(zip(column_names, [array_types.convert(chunk, array_type) for chunk in chunks])))
return iterator()
return dict(list(zip(column_names, [array_types.convert(chunk, array_type) for chunk in self.evaluate(column_names, selection=selection, parallel=parallel)])))
@docsubst
def to_copy(self, column_names=None, selection=None, strings=True, virtual=True, selections=True):
"""Return a copy of the DataFrame, if selection is None, it does not copy the data, it just has a reference
:param column_names: list of column names, to copy, when None DataFrame.get_column_names(strings=strings, virtual=virtual) is used
:param selection: {selection}
:param strings: argument passed to DataFrame.get_column_names when column_names is None
:param virtual: argument passed to DataFrame.get_column_names when column_names is None
:param selections: copy selections to a new DataFrame
:return: dict
"""
if column_names:
column_names = _ensure_strings_from_expressions(column_names)
df = vaex.from_items(*self.to_items(column_names=column_names, selection=selection, strings=strings, virtual=False))
if virtual:
for name, value in self.virtual_columns.items():
df.add_virtual_column(name, value)
if selections:
# the filter selection does not need copying
for key, value in self.selection_histories.items():
if key != FILTER_SELECTION_NAME:
df.selection_histories[key] = list(value)
for key, value in self.selection_history_indices.items():
if key != FILTER_SELECTION_NAME:
df.selection_history_indices[key] = value
df.functions.update(self.functions)
df.copy_metadata(self)
return df
def copy_metadata(self, other):
for name in self.get_column_names(strings=True):
if name in other.units:
self.units[name] = other.units[name]
if name in other.descriptions:
self.descriptions[name] = other.descriptions[name]
if name in other.ucds:
self.ucds[name] = other.ucds[name]
self.description = other.description
@docsubst
def to_pandas_df(self, column_names=None, selection=None, strings=True, virtual=True, index_name=None, parallel=True, chunk_size=None):
"""Return a pandas DataFrame containing the ndarray corresponding to the evaluated data
If index is given, that column is used for the index of the dataframe.
Example
>>> df_pandas = df.to_pandas_df(["x", "y", "z"])
>>> df_copy = vaex.from_pandas(df_pandas)
:param column_names: list of column names, to export, when None DataFrame.get_column_names(strings=strings, virtual=virtual) is used
:param selection: {selection}
:param strings: argument passed to DataFrame.get_column_names when column_names is None
:param virtual: argument passed to DataFrame.get_column_names when column_names is None
:param index_column: if this column is given it is used for the index of the DataFrame
:param parallel: {evaluate_parallel}
:param chunk_size: {chunk_size}
:return: pandas.DataFrame object or iterator of
"""
import pandas as pd
column_names = column_names or self.get_column_names(strings=strings, virtual=virtual)
if index_name not in column_names and index_name is not None:
column_names = column_names + [index_name]
def create_pdf(data):
if index_name is not None:
index = data.pop(index_name)
else:
index = None
df = pd.DataFrame(data=data, index=index)
if index is not None:
df.index.name = index_name
return df
if chunk_size is not None:
def iterator():
for i1, i2, chunks in self.evaluate_iterator(column_names, selection=selection, parallel=parallel, chunk_size=chunk_size):
yield i1, i2, create_pdf(dict(zip(column_names, chunks)))
return iterator()
else:
return create_pdf(self.to_dict(column_names=column_names, selection=selection, parallel=parallel))
@docsubst
def to_arrow_table(self, column_names=None, selection=None, strings=True, virtual=True, parallel=True, chunk_size=None):
"""Returns an arrow Table object containing the arrays corresponding to the evaluated data
:param column_names: list of column names, to export, when None DataFrame.get_column_names(strings=strings, virtual=virtual) is used
:param selection: {selection}
:param strings: argument passed to DataFrame.get_column_names when column_names is None
:param virtual: argument passed to DataFrame.get_column_names when column_names is None
:param parallel: {evaluate_parallel}
:param chunk_size: {chunk_size}
:return: pyarrow.Table object or iterator of
"""
from vaex.arrow.convert import arrow_array_from_numpy_array
import pyarrow as pa
column_names = column_names or self.get_column_names(strings=strings, virtual=virtual)
if chunk_size is not None:
def iterator():
for i1, i2, chunks in self.evaluate_iterator(column_names, selection=selection, parallel=parallel, chunk_size=chunk_size):
chunks = list(map(arrow_array_from_numpy_array, chunks))
yield i1, i2, pa.Table.from_arrays(chunks, column_names)
return iterator()
else:
chunks = self.evaluate(column_names, selection=selection, parallel=parallel)
chunks = list(map(arrow_array_from_numpy_array, chunks))
return pa.Table.from_arrays(chunks, column_names)
@docsubst
def to_astropy_table(self, column_names=None, selection=None, strings=True, virtual=True, index=None, parallel=True):
"""Returns a astropy table object containing the ndarrays corresponding to the evaluated data
:param column_names: list of column names, to export, when None DataFrame.get_column_names(strings=strings, virtual=virtual) is used
:param selection: {selection}
:param strings: argument passed to DataFrame.get_column_names when column_names is None
:param virtual: argument passed to DataFrame.get_column_names when column_names is None
:param index: if this column is given it is used for the index of the DataFrame
:return: astropy.table.Table object
"""
from astropy.table import Table, Column, MaskedColumn
meta = dict()
meta["description"] = self.description
table = Table(meta=meta)
for name, data in self.to_items(column_names=column_names, selection=selection, strings=strings, virtual=virtual, parallel=parallel):
if self.is_string(name): # for astropy we convert it to unicode, it seems to ignore object type
data = np.array(data).astype('U')
meta = dict()
if name in self.ucds:
meta["ucd"] = self.ucds[name]
if np.ma.isMaskedArray(data):
cls = MaskedColumn
else:
cls = Column
table[name] = cls(data, unit=self.unit(name), description=self.descriptions.get(name), meta=meta)
return table
def to_dask_array(self, chunks="auto"):
"""Lazily expose the DataFrame as a dask.array
Example
>>> df = vaex.example()
>>> A = df[['x', 'y', 'z']].to_dask_array()
>>> A
dask.array<vaex-df-1f048b40-10ec-11ea-9553, shape=(330000, 3), dtype=float64, chunksize=(330000, 3), chunktype=numpy.ndarray>
>>> A+1
dask.array<add, shape=(330000, 3), dtype=float64, chunksize=(330000, 3), chunktype=numpy.ndarray>
:param chunks: How to chunk the array, similar to :func:`dask.array.from_array`.
:return: :class:`dask.array.Array` object.
"""
import dask.array as da
import uuid
dtype = self._dtype
chunks = da.core.normalize_chunks(chunks, shape=self.shape, dtype=dtype)
name = 'vaex-df-%s' % str(uuid.uuid1())
def getitem(df, item):
return np.array(df.__getitem__(item).to_arrays(parallel=False)).T
dsk = da.core.getem(name, chunks, getitem=getitem, shape=self.shape, dtype=dtype)
dsk[name] = self
return da.Array(dsk, name, chunks, dtype=dtype)
def validate_expression(self, expression):
"""Validate an expression (may throw Exceptions)"""
# return self.evaluate(expression, 0, 2)
if str(expression) in self.virtual_columns:
return
if self.is_local() and str(expression) in self.columns:
return
vars = set(self.get_names(hidden=True)) | {'df'}
funcs = set(expression_namespace.keys()) | set(self.functions.keys())
try:
return vaex.expresso.validate_expression(expression, vars, funcs)
except NameError as e:
raise NameError(str(e)) from None
def _block_scope(self, i1, i2):
variables = {key: self.evaluate_variable(key) for key in self.variables.keys()}
return scopes._BlockScope(self, i1, i2, **variables)
def select(self, boolean_expression, mode="replace", name="default"):
"""Select rows based on the boolean_expression, if there was a previous selection, the mode is taken into account.
if boolean_expression is None, remove the selection, has_selection() will returns false
Note that per DataFrame, multiple selections are possible, and one filter (see :func:`DataFrame.select`).
:param str boolean_expression: boolean expression, such as 'x < 0', '(x < 0) || (y > -10)' or None to remove the selection
:param str mode: boolean operation to perform with the previous selection, "replace", "and", "or", "xor", "subtract"
:return: None
"""
raise NotImplementedError
def add_column(self, name, f_or_array, dtype=None):
"""Add an in memory array as a column."""
column_position = len(self.column_names)
if name in self.get_column_names():
column_position = self.column_names.index(name)
renamed = '__' +vaex.utils.find_valid_name(name, used=self.get_column_names())
self._rename(name, renamed)
if isinstance(f_or_array, supported_column_types):
data = ar = f_or_array
# it can be None when we have an 'empty' DataFrameArrays
if self._length_original is None:
self._length_unfiltered = _len(data)
self._length_original = _len(data)
self._index_end = self._length_unfiltered
if _len(ar) != self.length_original():
if self.filtered:
# give a better warning to avoid confusion
if len(self) == len(ar):
raise ValueError("Array is of length %s, while the length of the DataFrame is %s due to the filtering, the (unfiltered) length is %s." % (len(ar), len(self), self.length_unfiltered()))
raise ValueError("array is of length %s, while the length of the DataFrame is %s" % (len(ar), self.length_original()))
valid_name = vaex.utils.find_valid_name(name, used=self.get_column_names(hidden=True))
self.columns[valid_name] = ar
if valid_name not in self.column_names:
self.column_names.insert(column_position, valid_name)
else:
raise ValueError("functions not yet implemented")
# self._save_assign_expression(valid_name, Expression(self, valid_name))
self._initialize_column(valid_name)
def _initialize_column(self, name):
self._save_assign_expression(name)
def _sparse_matrix(self, column):
column = _ensure_string_from_expression(column)
return self._sparse_matrices.get(column)
def add_columns(self, names, columns):
from scipy.sparse import csc_matrix, csr_matrix
if isinstance(columns, csr_matrix):
if len(names) != columns.shape[1]:
raise ValueError('number of columns ({}) does not match number of column names ({})'.format(columns.shape[1], len(names)))
for i, name in enumerate(names):
valid_name = vaex.utils.find_valid_name(name, used=self.get_column_names(hidden=True))
self.columns[valid_name] = ColumnSparse(columns, i)
self.column_names.append(valid_name)
self._sparse_matrices[valid_name] = columns
self._save_assign_expression(valid_name)
else:
raise ValueError('only scipy.sparse.csr_matrix is supported')
def _save_assign_expression(self, name, expression=None):
obj = getattr(self, name, None)
# it's ok to set it if it does not exist, or we overwrite an older expression
if obj is None or isinstance(obj, Expression):
if expression is None:
expression = name
if isinstance(expression, str):
expression = vaex.utils.valid_expression(self.get_column_names(hidden=True), expression)
expression = Expression(self, expression)
setattr(self, name, expression)
@_hidden
def add_column_healpix(self, name="healpix", longitude="ra", latitude="dec", degrees=True, healpix_order=12, nest=True):
"""Add a healpix (in memory) column based on a longitude and latitude
:param name: Name of column
:param longitude: longitude expression
:param latitude: latitude expression (astronomical convenction latitude=90 is north pole)
:param degrees: If lon/lat are in degrees (default) or radians.
:param healpix_order: healpix order, >= 0
:param nest: Nested healpix (default) or ring.
"""
import healpy as hp
if degrees:
scale = "*pi/180"
else:
scale = ""
# TODO: multithread this
phi = self.evaluate("(%s)%s" % (longitude, scale))
theta = self.evaluate("pi/2-(%s)%s" % (latitude, scale))
hp_index = hp.ang2pix(hp.order2nside(healpix_order), theta, phi, nest=nest)
self.add_column("healpix", hp_index)
@_hidden
def add_virtual_columns_matrix3d(self, x, y, z, xnew, ynew, znew, matrix, matrix_name='deprecated', matrix_is_expression=False, translation=[0, 0, 0], propagate_uncertainties=False):
"""
:param str x: name of x column
:param str y:
:param str z:
:param str xnew: name of transformed x column
:param str ynew:
:param str znew:
:param list[list] matrix: 2d array or list, with [row,column] order
:param str matrix_name:
:return:
"""
m = matrix
x, y, z = self._expr(x, y, z)
self[xnew] = m[0][0] * x + m[0][1] * y + m[0][2] * z + translation[0]
self[ynew] = m[1][0] * x + m[1][1] * y + m[1][2] * z + translation[1]
self[znew] = m[2][0] * x + m[2][1] * y + m[2][2] * z + translation[2]
if propagate_uncertainties:
self.propagate_uncertainties([self[xnew], self[ynew], self[znew]], [x, y, z])
# wrap these with an informative msg
# add_virtual_columns_eq2ecl = _requires('astro')
# add_virtual_columns_eq2gal = _requires('astro')
# add_virtual_columns_distance_from_parallax = _requires('astro')
# add_virtual_columns_cartesian_velocities_to_pmvr = _requires('astro')
# add_virtual_columns_proper_motion_eq2gal = _requires('astro')
# add_virtual_columns_lbrvr_proper_motion2vcartesian = _requires('astro')
# add_virtual_columns_equatorial_to_galactic_cartesian = _requires('astro')
# add_virtual_columns_celestial = _requires('astro')
# add_virtual_columns_proper_motion2vperpendicular = _requires('astro')
def _covariance_matrix_guess(self, columns, full=False, as_expression=False):
all_column_names = self.get_column_names()
columns = _ensure_strings_from_expressions(columns)
def _guess(x, y):
if x == y:
postfixes = ["_error", "_uncertainty", "e", "_e"]
prefixes = ["e", "e_"]
for postfix in postfixes:
if x + postfix in all_column_names:
return x + postfix
for prefix in prefixes:
if prefix + x in all_column_names:
return prefix + x
if full:
raise ValueError("No uncertainty found for %r" % x)
else:
postfixes = ["_cov", "_covariance"]
for postfix in postfixes:
if x + "_" + y + postfix in all_column_names:
return x + "_" + y + postfix
if y + "_" + x + postfix in all_column_names:
return y + "_" + x + postfix
postfixes = ["_correlation", "_corr"]
for postfix in postfixes:
if x + "_" + y + postfix in all_column_names:
return x + "_" + y + postfix + " * " + _guess(x, x) + " * " + _guess(y, y)
if y + "_" + x + postfix in all_column_names:
return y + "_" + x + postfix + " * " + _guess(y, y) + " * " + _guess(x, x)
if full:
raise ValueError("No covariance or correlation found for %r and %r" % (x, y))
return "0"
N = len(columns)
cov_matrix = [[""] * N for i in range(N)]
for i in range(N):
for j in range(N):
cov = _guess(columns[i], columns[j])
if i == j and cov:
cov += "**2" # square the diagnal
cov_matrix[i][j] = cov
if as_expression:
return [[self[k] for k in row] for row in cov_matrix]
else:
return cov_matrix
def _jacobian(self, expressions, variables):
expressions = _ensure_strings_from_expressions(expressions)
return [[self[expression].expand(stop=[var]).derivative(var) for var in variables] for expression in expressions]
def propagate_uncertainties(self, columns, depending_variables=None, cov_matrix='auto',
covariance_format="{}_{}_covariance",
uncertainty_format="{}_uncertainty"):
"""Propagates uncertainties (full covariance matrix) for a set of virtual columns.
Covariance matrix of the depending variables is guessed by finding columns prefixed by "e"
or `"e_"` or postfixed by "_error", "_uncertainty", "e" and `"_e"`.
Off diagonals (covariance or correlation) by postfixes with "_correlation" or "_corr" for
correlation or "_covariance" or "_cov" for covariances.
(Note that x_y_cov = x_e * y_e * x_y_correlation.)
Example
>>> df = vaex.from_scalars(x=1, y=2, e_x=0.1, e_y=0.2)
>>> df["u"] = df.x + df.y
>>> df["v"] = np.log10(df.x)
>>> df.propagate_uncertainties([df.u, df.v])
>>> df.u_uncertainty, df.v_uncertainty
:param columns: list of columns for which to calculate the covariance matrix.
:param depending_variables: If not given, it is found out automatically, otherwise a list of columns which have uncertainties.
:param cov_matrix: List of list with expressions giving the covariance matrix, in the same order as depending_variables. If 'full' or 'auto',
the covariance matrix for the depending_variables will be guessed, where 'full' gives an error if an entry was not found.
"""
names = _ensure_strings_from_expressions(columns)
virtual_columns = self._expr(*columns, always_list=True)
if depending_variables is None:
depending_variables = set()
for expression in virtual_columns:
depending_variables |= expression.expand().variables()
depending_variables = list(sorted(list(depending_variables)))
fs = [self[self.virtual_columns[name]] for name in names]
jacobian = self._jacobian(fs, depending_variables)
m = len(fs)
n = len(depending_variables)
# n x n matrix
cov_matrix = self._covariance_matrix_guess(depending_variables, full=cov_matrix == "full", as_expression=True)
# empty m x m matrix
cov_matrix_out = [[self['0'] for __ in range(m)] for __ in range(m)]
for i in range(m):
for j in range(m):
for k in range(n):
for l in range(n):
if jacobian[i][k].expression == '0' or jacobian[j][l].expression == '0' or cov_matrix[k][l].expression == '0':
pass
else:
cov_matrix_out[i][j] = cov_matrix_out[i][j] + jacobian[i][k] * cov_matrix[k][l] * jacobian[j][l]
for i in range(m):
for j in range(i + 1):
sigma = cov_matrix_out[i][j]
sigma = self._expr(vaex.expresso.simplify(_ensure_string_from_expression(sigma)))
if i != j:
self.add_virtual_column(covariance_format.format(names[i], names[j]), sigma)
else:
self.add_virtual_column(uncertainty_format.format(names[i]), np.sqrt(sigma))
@_hidden
def add_virtual_columns_cartesian_to_polar(self, x="x", y="y", radius_out="r_polar", azimuth_out="phi_polar",
propagate_uncertainties=False,
radians=False):
kwargs = dict(**locals())
del kwargs['self']
return self.geo.cartesian_to_polar(inplace=True, **kwargs)
@_hidden
def add_virtual_columns_cartesian_velocities_to_spherical(self, x="x", y="y", z="z", vx="vx", vy="vy", vz="vz", vr="vr", vlong="vlong", vlat="vlat", distance=None):
kwargs = dict(**locals())
del kwargs['self']
return self.geo.velocity_cartesian2spherical(inplace=True, **kwargs)
def _expr(self, *expressions, **kwargs):
always_list = kwargs.pop('always_list', False)
return Expression(self, expressions[0]) if len(expressions) == 1 and not always_list else [Expression(self, k) for k in expressions]
@_hidden
def add_virtual_columns_cartesian_velocities_to_polar(self, x="x", y="y", vx="vx", radius_polar=None, vy="vy", vr_out="vr_polar", vazimuth_out="vphi_polar",
propagate_uncertainties=False,):
kwargs = dict(**locals())
del kwargs['self']
return self.geo.velocity_cartesian2polar(inplace=True, **kwargs)
@_hidden
def add_virtual_columns_polar_velocities_to_cartesian(self, x='x', y='y', azimuth=None, vr='vr_polar', vazimuth='vphi_polar', vx_out='vx', vy_out='vy', propagate_uncertainties=False):
kwargs = dict(**locals())
del kwargs['self']
return self.geo.velocity_polar2cartesian(inplace=True, **kwargs)
@_hidden
def add_virtual_columns_rotation(self, x, y, xnew, ynew, angle_degrees, propagate_uncertainties=False):
kwargs = dict(**locals())
del kwargs['self']
return self.geo.rotation_2d(inplace=True, **kwargs)
@docsubst
@_hidden
def add_virtual_columns_spherical_to_cartesian(self, alpha, delta, distance, xname="x", yname="y", zname="z",
propagate_uncertainties=False,
center=[0, 0, 0], radians=False):
kwargs = dict(**locals())
del kwargs['self']
return self.geo.spherical2cartesian(inplace=True, **kwargs)
@_hidden
def add_virtual_columns_cartesian_to_spherical(self, x="x", y="y", z="z", alpha="l", delta="b", distance="distance", radians=False, center=None, center_name="solar_position"):
kwargs = dict(**locals())
del kwargs['self']
return self.geo.cartesian2spherical(inplace=True, **kwargs)
@_hidden
def add_virtual_columns_aitoff(self, alpha, delta, x, y, radians=True):
kwargs = dict(**locals())
del kwargs['self']
return self.geo.project_aitoff(inplace=True, **kwargs)
@_hidden
def add_virtual_columns_projection_gnomic(self, alpha, delta, alpha0=0, delta0=0, x="x", y="y", radians=False, postfix=""):
kwargs = dict(**locals())
del kwargs['self']
return self.geo.project_gnomic(inplace=True, **kwargs)
def add_function(self, name, f, unique=False):
name = vaex.utils.find_valid_name(name, used=[] if not unique else self.functions.keys())
function = vaex.expression.Function(self, name, f)
self.functions[name] = function
return function
def add_virtual_column(self, name, expression, unique=False):
"""Add a virtual column to the DataFrame.
Example:
>>> df.add_virtual_column("r", "sqrt(x**2 + y**2 + z**2)")
>>> df.select("r < 10")
:param: str name: name of virtual column
:param: expression: expression for the column
:param str unique: if name is already used, make it unique by adding a postfix, e.g. _1, or _2
"""
type = "change" if name in self.virtual_columns else "add"
if isinstance(expression, Expression):
if expression.df is not self:
expression = expression.copy(self)
column_position = len(self.column_names)
# if the current name is an existing column name....
if name in self.get_column_names(hidden=True):
column_position = self.column_names.index(name)
renamed = vaex.utils.find_valid_name('__' +name, used=self.get_column_names(hidden=True))
# we rewrite all existing expressions (including the passed down expression argument)
self._rename(name, renamed)
expression = _ensure_string_from_expression(expression)
if vaex.utils.find_valid_name(name) != name:
# if we have to rewrite the name, we need to make it unique
unique = True
valid_name = vaex.utils.find_valid_name(name, used=[] if not unique else self.get_column_names(hidden=True))
self.virtual_columns[valid_name] = expression
self._virtual_expressions[valid_name] = Expression(self, expression)
if name not in self.column_names:
self.column_names.insert(column_position, valid_name)
self._save_assign_expression(valid_name)
self.signal_column_changed.emit(self, valid_name, "add")
# self.write_virtual_meta()
def rename(self, name, new_name, unique=False):
"""Renames a column or variable, and rewrite expressions such that they refer to the new name"""
if name == new_name:
return
new_name = vaex.utils.find_valid_name(new_name, used=[] if not unique else self.get_column_names(hidden=True))
self._rename(name, new_name, rename_meta_data=True)
return new_name
def _rename(self, old, new, rename_meta_data=False):
is_variable = False
if old in self.variables:
self.variables[new] = self.variables.pop(old)
is_variable = True
elif old in self.virtual_columns:
self.virtual_columns[new] = self.virtual_columns.pop(old)
self._virtual_expressions[new] = self._virtual_expressions.pop(old)
elif self.is_local() and old in self.columns:
# we only have to do this locally
# if we don't do this locally, we still store this info
# in self._renamed_columns, so it will happen at the server
self.columns[new] = self.columns.pop(old)
if rename_meta_data:
for d in [self.ucds, self.units, self.descriptions]:
if old in d:
d[new] = d[old]
del d[old]
for key, value in self.selection_histories.items():
self.selection_histories[key] = list([k if k is None else k._rename(self, old, new) for k in value])
if not is_variable:
if new not in self.virtual_columns:
self._renamed_columns.append((old, new))
self.column_names[self.column_names.index(old)] = new
if hasattr(self, old):
try:
if isinstance(getattr(self, old), Expression):
delattr(self, old)
except:
pass
self._save_assign_expression(new)
existing_expressions = [k() for k in self._expressions]
existing_expressions = [k for k in existing_expressions if k is not None]
for expression in existing_expressions:
expression._rename(old, new, inplace=True)
self.virtual_columns = {k:self._virtual_expressions[k].expression for k, v in self.virtual_columns.items()}
def delete_virtual_column(self, name):
"""Deletes a virtual column from a DataFrame."""
del self.virtual_columns[name]
del self._virtual_expressions[name]
self.signal_column_changed.emit(self, name, "delete")
# self.write_virtual_meta()
def add_variable(self, name, expression, overwrite=True, unique=True):
"""Add a variable to a DataFrame.
A variable may refer to other variables, and virtual columns and expression may refer to variables.
Example
>>> df.add_variable('center', 0)
>>> df.add_virtual_column('x_prime', 'x-center')
>>> df.select('x_prime < 0')
:param: str name: name of virtual varible
:param: expression: expression for the variable
"""
if unique or overwrite or name not in self.variables:
existing_names = self.get_column_names(virtual=False) + list(self.variables.keys())
name = vaex.utils.find_valid_name(name, used=[] if not unique else existing_names)
self.variables[name] = expression
self.signal_variable_changed.emit(self, name, "add")
if unique:
return name
def delete_variable(self, name):
"""Deletes a variable from a DataFrame."""
del self.variables[name]
self.signal_variable_changed.emit(self, name, "delete")
# self.write_virtual_meta()
def info(self, description=True):
from IPython import display
self._output_css()
display.display(display.HTML(self._info(description=description)))
def _info(self, description=True):
parts = ["""<div><h2>{}</h2> <b>rows</b>: {:,}</div>""".format(self.name, len(self))]
if hasattr(self, 'path'):
parts += ["""<div><b>path</b>: <i>%s</i></div>""" % (self.path)]
if self.description:
parts += ["""<div><b>Description</b>: {}</div>""".format(self.description)]
parts += ["<h2>Columns:</h2>"]
parts += ["<table class='table-striped'>"]
parts += ["<thead><tr>"]
for header in "column type unit description expression".split():
if description or header != "description":
parts += ["<th>%s</th>" % header]
parts += ["</tr></thead>"]
for name in self.get_column_names():
parts += ["<tr>"]
parts += ["<td>%s</td>" % name]
virtual = name in self.virtual_columns
if not virtual:
dtype = str(self.data_type(name)) if self.data_type(name) != str else 'str'
else:
dtype = "</i>virtual column</i>"
parts += ["<td>%s</td>" % dtype]
units = self.unit(name)
units = units.to_string("latex_inline") if units else ""
parts += ["<td>%s</td>" % units]
if description:
parts += ["<td ><pre>%s</pre></td>" % self.descriptions.get(name, "")]
if virtual:
parts += ["<td><code>%s</code></td>" % self.virtual_columns[name]]
else:
parts += ["<td></td>"]
parts += ["</tr>"]
parts += "</table>"
ignore_list = 'pi e km_in_au seconds_per_year'.split()
variable_names = [name for name in self.variables.keys() if name not in ignore_list]
if variable_names:
parts += ["<h2>Variables:</h2>"]
parts += ["<table class='table-striped'>"]
parts += ["<thead><tr>"]
for header in "variable type unit description expression".split():
if description or header != "description":
parts += ["<th>%s</th>" % header]
parts += ["</tr></thead>"]
for name in variable_names:
parts += ["<tr>"]
parts += ["<td>%s</td>" % name]
type = self.data_type(name).name
parts += ["<td>%s</td>" % type]
units = self.unit(name)
units = units.to_string("latex_inline") if units else ""
parts += ["<td>%s</td>" % units]
if description:
parts += ["<td ><pre>%s</pre></td>" % self.descriptions.get(name, "")]
parts += ["<td><code>%s</code></td>" % (self.variables[name], )]
parts += ["</tr>"]
parts += "</table>"
return "".join(parts) + "<h2>Data:</h2>" + self._head_and_tail_table()
def head(self, n=10):
"""Return a shallow copy a DataFrame with the first n rows."""
return self[:min(n, len(self))]
def tail(self, n=10):
"""Return a shallow copy a DataFrame with the last n rows."""
N = len(self)
# self.cat(i1=max(0, N-n), i2=min(len(self), N))
return self[max(0, N - n):min(len(self), N)]
def _head_and_tail_table(self, n=5, format='html'):
N = _len(self)
if N <= n * 2:
return self._as_table(0, N, format=format)
else:
return self._as_table(0, n, N - n, N, format=format)
def head_and_tail_print(self, n=5):
"""Display the first and last n elements of a DataFrame."""
from IPython import display
display.display(display.HTML(self._head_and_tail_table(n)))
def describe(self, strings=True, virtual=True, selection=None):
"""Give a description of the DataFrame.
>>> import vaex
>>> df = vaex.example()[['x', 'y', 'z']]
>>> df.describe()
x y z
dtype float64 float64 float64
count 330000 330000 330000
missing 0 0 0
mean -0.0671315 -0.0535899 0.0169582
std 7.31746 7.78605 5.05521
min -128.294 -71.5524 -44.3342
max 271.366 146.466 50.7185
>>> df.describe(selection=df.x > 0)
x y z
dtype float64 float64 float64
count 164060 164060 164060
missing 165940 165940 165940
mean 5.13572 -0.486786 -0.0868073
std 5.18701 7.61621 5.02831
min 1.51635e-05 -71.5524 -44.3342
max 271.366 78.0724 40.2191
:param bool strings: Describe string columns or not
:param bool virtual: Describe virtual columns or not
:param selection: Optional selection to use.
:return: Pandas dataframe
"""
import pandas as pd
N = len(self)
columns = {}
for feature in self.get_column_names(strings=strings, virtual=virtual)[:]:
data_type = self.data_type(feature, array_type='numpy')
if not isinstance(data_type, np.dtype):
if data_type in array_types.string_types:
count = self.count(feature, selection=selection, delay=True)
self.execute()
count = count.get()
columns[feature] = ((data_type, count, N-count, '--', '--', '--', '--'))
else:
raise NotImplementedError(f'Did not implement describe for data type {data_type}')
elif data_type.kind in 'SU':
# TODO: this blocks is the same as the string block above, can we avoid SU types?
count = self.count(feature, selection=selection, delay=True)
self.execute()
count = count.get()
columns[feature] = ((data_type, count, N-count, '--', '--', '--', '--'))
elif data_type.kind in 'O':
# this will also properly count NaN-like objects like NaT
count_na = self[feature].isna().astype('int').sum(delay=True)
self.execute()
count_na = count_na.get()
columns[feature] = ((data_type, N-count_na, count_na, '--', '--', '--', '--'))
else:
is_datetime = self.is_datetime(feature)
mean = self.mean(feature, selection=selection, delay=True)
std = self.std(feature, selection=selection, delay=True)
minmax = self.minmax(feature, selection=selection, delay=True)
if is_datetime: # this path tests using isna, which test for nat
count_na = self[feature].isna().astype('int').sum(delay=True)
else:
count = self.count(feature, selection=selection, delay=True)
self.execute()
if is_datetime:
count_na, mean, std, minmax = count_na.get(), mean.get(), std.get(), minmax.get()
count = N - int(count_na)
else:
count, mean, std, minmax = count.get(), mean.get(), std.get(), minmax.get()
count = int(count)
columns[feature] = ((data_type, count, N-count, mean, std, minmax[0], minmax[1]))
return pd.DataFrame(data=columns, index=['data_type', 'count', 'NA', 'mean', 'std', 'min', 'max'])
def cat(self, i1, i2, format='html'):
"""Display the DataFrame from row i1 till i2
For format, see https://pypi.org/project/tabulate/
:param int i1: Start row
:param int i2: End row.
:param str format: Format to use, e.g. 'html', 'plain', 'latex'
"""
from IPython import display
if format == 'html':
output = self._as_html_table(i1, i2)
display.display(display.HTML(output))
else:
output = self._as_table(i1, i2, format=format)
print(output)
def _as_table(self, i1, i2, j1=None, j2=None, format='html'):
from .formatting import _format_value
parts = [] # """<div>%s (length=%d)</div>""" % (self.name, len(self))]
parts += ["<table class='table-striped'>"]
# we need to get the underlying names since we use df.evaluate
column_names = self.get_column_names()
values_list = []
values_list.append(['#', []])
# parts += ["<thead><tr>"]
for name in column_names:
values_list.append([name, []])
# parts += ["<th>%s</th>" % name]
# parts += ["</tr></thead>"]
def table_part(k1, k2, parts):
N = k2 - k1
# slicing will invoke .extract which will make the evaluation
# much quicker
df = self[k1:k2]
try:
values = dict(zip(column_names, df.evaluate(column_names)))
except:
values = {}
for i, name in enumerate(column_names):
try:
values[name] = df.evaluate(name)
except:
values[name] = ["error"] * (N)
logger.exception('error evaluating: %s at rows %i-%i' % (name, k1, k2))
for i in range(k2 - k1):
# parts += ["<tr>"]
# parts += ["<td><i style='opacity: 0.6'>{:,}</i></td>".format(i + k1)]
if format == 'html':
value = "<i style='opacity: 0.6'>{:,}</i>".format(i + k1)
else:
value = "{:,}".format(i + k1)
values_list[0][1].append(value)
for j, name in enumerate(column_names):
value = values[name][i]
value = _format_value(value)
values_list[j+1][1].append(value)
# parts += ["</tr>"]
# return values_list
if i2 - i1 > 0:
parts = table_part(i1, i2, parts)
if j1 is not None and j2 is not None:
values_list[0][1].append('...')
for i in range(len(column_names)):
# parts += ["<td>...</td>"]
values_list[i+1][1].append('...')
# parts = table_part(j1, j2, parts)
table_part(j1, j2, parts)
else:
for header, values in values_list:
values.append(None)
# parts += "</table>"
# html = "".join(parts)
# return html
values_list = dict(values_list)
# print(values_list)
import tabulate
table_text = str(tabulate.tabulate(values_list, headers="keys", tablefmt=format))
if tabulate.__version__ == '0.8.7':
# Tabulate 0.8.7 escapes html :()
table_text = table_text.replace('<i style='opacity: 0.6'>', "<i style='opacity: 0.6'>")
table_text = table_text.replace('</i>', "</i>")
if i2 - i1 == 0:
if self._length_unfiltered != len(self):
footer_text = 'No rows to display (because of filtering).'
else:
footer_text = 'No rows to display.'
if format == 'html':
table_text += f'<i>{footer_text}</i>'
if format == 'plain':
table_text += f'\n{footer_text}'
return table_text
def _as_html_table(self, i1, i2, j1=None, j2=None):
# TODO: this method can be replaced by _as_table
from .formatting import _format_value
parts = [] # """<div>%s (length=%d)</div>""" % (self.name, len(self))]
parts += ["<table class='table-striped'>"]
column_names = self.get_column_names()
parts += ["<thead><tr>"]
for name in ["#"] + column_names:
parts += ["<th>%s</th>" % name]
parts += ["</tr></thead>"]
def table_part(k1, k2, parts):
data_parts = {}
N = k2 - k1
for name in column_names:
try:
data_parts[name] = self.evaluate(name, i1=k1, i2=k2)
except:
data_parts[name] = ["error"] * (N)
logger.exception('error evaluating: %s at rows %i-%i' % (name, k1, k2))
for i in range(k2 - k1):
parts += ["<tr>"]
parts += ["<td><i style='opacity: 0.6'>{:,}</i></td>".format(i + k1)]
for name in column_names:
value = data_parts[name][i]
value = _format_value(value)
parts += ["<td>%r</td>" % value]
parts += ["</tr>"]
return parts
parts = table_part(i1, i2, parts)
if j1 is not None and j2 is not None:
for i in range(len(column_names) + 1):
parts += ["<td>...</td>"]
parts = table_part(j1, j2, parts)
parts += "</table>"
html = "".join(parts)
return html
def _output_css(self):
css = """.vaex-description pre {
max-width : 450px;
white-space : nowrap;
overflow : hidden;
text-overflow: ellipsis;
}
.vex-description pre:hover {
max-width : initial;
white-space: pre;
}"""
from IPython import display
style = "<style>%s</style>" % css
display.display(display.HTML(style))
def _repr_mimebundle_(self, include=None, exclude=None, **kwargs):
# TODO: optimize, since we use the same data in both versions
# TODO: include latex version
return {'text/html':self._head_and_tail_table(format='html'), 'text/plain': self._head_and_tail_table(format='plain')}
def _repr_html_(self):
"""Representation for Jupyter."""
self._output_css()
return self._head_and_tail_table()
def __str__(self):
return self._head_and_tail_table(format='plain')
if not _DEBUG:
def __repr__(self):
return self._head_and_tail_table(format='plain')
def __current_sequence_index(self):
"""TODO"""
return 0
def has_current_row(self):
"""Returns True/False if there currently is a picked row."""
return self._current_row is not None
def get_current_row(self):
"""Individual rows can be 'picked', this is the index (integer) of the current row, or None there is nothing picked."""
return self._current_row
def set_current_row(self, value):
"""Set the current row, and emit the signal signal_pick."""
if (value is not None) and ((value < 0) or (value >= len(self))):
raise IndexError("index %d out of range [0,%d]" % (value, len(self)))
self._current_row = value
self.signal_pick.emit(self, value)
def __has_snapshots(self):
# currenly disabled
return False
def column_count(self, hidden=False):
"""Returns the number of columns (including virtual columns).
:param bool hidden: If True, include hidden columns in the tally
:returns: Number of columns in the DataFrame
"""
return len(self.get_column_names(hidden=hidden))
def get_names(self, hidden=False):
"""Return a list of column names and variable names."""
names = self.get_column_names(hidden=hidden)
return names + [k for k in self.variables.keys() if not hidden or not k.startswith('__')]
def get_column_names(self, virtual=True, strings=True, hidden=False, regex=None):
"""Return a list of column names
Example:
>>> import vaex
>>> df = vaex.from_scalars(x=1, x2=2, y=3, s='string')
>>> df['r'] = (df.x**2 + df.y**2)**2
>>> df.get_column_names()
['x', 'x2', 'y', 's', 'r']
>>> df.get_column_names(virtual=False)
['x', 'x2', 'y', 's']
>>> df.get_column_names(regex='x.*')
['x', 'x2']
:param virtual: If False, skip virtual columns
:param hidden: If False, skip hidden columns
:param strings: If False, skip string columns
:param regex: Only return column names matching the (optional) regular expression
:param alias: Return the alias (True) or internal name (False).
:rtype: list of str
"""
def column_filter(name):
'''Return True if column with specified name should be returned'''
if regex and not re.match(regex, name):
return False
if not virtual and name in self.virtual_columns:
return False
if not strings and self.is_string(name):
return False
if not hidden and name.startswith('__'):
return False
return True
if hidden and virtual and regex is None:
return list(self.column_names) # quick path
if not hidden and virtual and regex is None:
return [k for k in self.column_names if not k.startswith('__')] # also a quick path
return [name for name in self.column_names if column_filter(name)]
def __bool__(self):
return True # we are always true :) otherwise Python might call __len__, which can be expensive
def __len__(self):
"""Returns the number of rows in the DataFrame (filtering applied)."""
if not self.filtered:
return self._length_unfiltered
else:
if self._cached_filtered_length is None:
self. _cached_filtered_length = int(self.count())
return self._cached_filtered_length
def selected_length(self):
"""Returns the number of rows that are selected."""
raise NotImplementedError
def length_original(self):
"""the full length of the DataFrame, independent what active_fraction is, or filtering. This is the real length of the underlying ndarrays."""
return self._length_original
def length_unfiltered(self):
"""The length of the arrays that should be considered (respecting active range), but without filtering."""
return self._length_unfiltered
def active_length(self):
return self._length_unfiltered
def get_active_fraction(self):
"""Value in the range (0, 1], to work only with a subset of rows.
"""
return self._active_fraction
def set_active_fraction(self, value):
"""Sets the active_fraction, set picked row to None, and remove selection.
TODO: we may be able to keep the selection, if we keep the expression, and also the picked row
"""
if value != self._active_fraction:
self._active_fraction = value
# self._fraction_length = int(self._length * self._active_fraction)
self.select(None)
self.set_current_row(None)
self._length_unfiltered = int(round(self._length_original * self._active_fraction))
self._cached_filtered_length = None
self._index_start = 0
self._index_end = self._length_unfiltered
self.signal_active_fraction_changed.emit(self, value)
def get_active_range(self):
return self._index_start, self._index_end
def set_active_range(self, i1, i2):
"""Sets the active_fraction, set picked row to None, and remove selection.
TODO: we may be able to keep the selection, if we keep the expression, and also the picked row
"""
logger.debug("set active range to: %r", (i1, i2))
self._active_fraction = (i2 - i1) / float(self.length_original())
# self._fraction_length = int(self._length * self._active_fraction)
self._index_start = i1
self._index_end = i2
self.select(None)
self.set_current_row(None)
self._length_unfiltered = i2 - i1
self._cached_filtered_length = None
self.signal_active_fraction_changed.emit(self, self._active_fraction)
@docsubst
def trim(self, inplace=False):
'''Return a DataFrame, where all columns are 'trimmed' by the active range.
For the returned DataFrame, df.get_active_range() returns (0, df.length_original()).
{note_copy}
:param inplace: {inplace}
:rtype: DataFrame
'''
df = self if inplace else self.copy()
if self._index_start == 0 and self._index_end == self._length_original:
return df
df.dataset = self.dataset[self._index_start:self._index_end]
return df
@docsubst
def take(self, indices, filtered=True, dropfilter=True):
'''Returns a DataFrame containing only rows indexed by indices
{note_copy}
Example:
>>> import vaex, numpy as np
>>> df = vaex.from_arrays(s=np.array(['a', 'b', 'c', 'd']), x=np.arange(1,5))
>>> df.take([0,2])
# s x
0 a 1
1 c 3
:param indices: sequence (list or numpy array) with row numbers
:param filtered: (for internal use) The indices refer to the filtered data.
:param dropfilter: (for internal use) Drop the filter, set to False when
indices refer to unfiltered, but may contain rows that still need to be filtered out.
:return: DataFrame which is a shallow copy of the original data.
:rtype: DataFrame
'''
df_trimmed = self.trim()
df = df_trimmed.copy()
indices = np.asarray(indices)
if df.filtered and filtered:
# we translate the indices that refer to filters row indices to
# indices of the unfiltered row indices
df.count() # make sure the mask is filled
max_index = indices.max()
mask = df._selection_masks[FILTER_SELECTION_NAME]
filtered_indices = mask.first(max_index+1)
indices = filtered_indices[indices]
df.dataset = df.dataset.take(indices)
if dropfilter:
# if the indices refer to the filtered rows, we can discard the
# filter in the final dataframe
df.set_selection(None, name=FILTER_SELECTION_NAME)
return df
@docsubst
def extract(self):
'''Return a DataFrame containing only the filtered rows.
{note_copy}
The resulting DataFrame may be more efficient to work with when the original DataFrame is
heavily filtered (contains just a small number of rows).
If no filtering is applied, it returns a trimmed view.
For the returned df, len(df) == df.length_original() == df.length_unfiltered()
:rtype: DataFrame
'''
trimmed = self.trim()
if trimmed.filtered:
self.count() # make sure the mask is filled
mask = self._selection_masks[FILTER_SELECTION_NAME]
indices = mask.first(len(self))
assert len(indices) == len(self)
return self.take(indices, filtered=False)
else:
return trimmed
@docsubst
def sample(self, n=None, frac=None, replace=False, weights=None, random_state=None):
'''Returns a DataFrame with a random set of rows
{note_copy}
Provide either n or frac.
Example:
>>> import vaex, numpy as np
>>> df = vaex.from_arrays(s=np.array(['a', 'b', 'c', 'd']), x=np.arange(1,5))
>>> df
# s x
0 a 1
1 b 2
2 c 3
3 d 4
>>> df.sample(n=2, random_state=42) # 2 random rows, fixed seed
# s x
0 b 2
1 d 4
>>> df.sample(frac=1, random_state=42) # 'shuffling'
# s x
0 c 3
1 a 1
2 d 4
3 b 2
>>> df.sample(frac=1, replace=True, random_state=42) # useful for bootstrap (may contain repeated samples)
# s x
0 d 4
1 a 1
2 a 1
3 d 4
:param int n: number of samples to take (default 1 if frac is None)
:param float frac: fractional number of takes to take
:param bool replace: If true, a row may be drawn multiple times
:param str or expression weights: (unnormalized) probability that a row can be drawn
:param int or RandomState: seed or RandomState for reproducability, when None a random seed it chosen
:return: {return_shallow_copy}
:rtype: DataFrame
'''
self = self.extract()
if type(random_state) == int or random_state is None:
random_state = np.random.RandomState(seed=random_state)
if n is None and frac is None:
n = 1
elif frac is not None:
n = int(round(frac * len(self)))
weights_values = None
if weights is not None:
weights_values = self.evaluate(weights)
weights_values = weights_values / self.sum(weights)
indices = random_state.choice(len(self), n, replace=replace, p=weights_values)
return self.take(indices)
@docsubst
@vaex.utils.gen_to_list
def split_random(self, frac, random_state=None):
'''Returns a list containing random portions of the DataFrame.
{note_copy}
Example:
>>> import vaex, import numpy as np
>>> np.random.seed(111)
>>> df = vaex.from_arrays(x = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9])
>>> for dfs in df.split_random(frac=0.3, random_state=42):
... print(dfs.x.values)
...
[8 1 5]
[0 7 2 9 4 3 6]
>>> for split in df.split_random(frac=[0.2, 0.3, 0.5], random_state=42):
... print(dfs.x.values)
[8 1]
[5 0 7]
[2 9 4 3 6]
:param int/list frac: If int will split the DataFrame in two portions, the first of which will have size as specified by this parameter. If list, the generator will generate as many portions as elements in the list, where each element defines the relative fraction of that portion.
:param int random_state: (default, None) Random number seed for reproducibility.
:return: A list of DataFrames.
:rtype: list
'''
self = self.extract()
if type(random_state) == int or random_state is None:
random_state = np.random.RandomState(seed=random_state)
indices = random_state.choice(len(self), len(self), replace=False)
return self.take(indices).split(frac)
@docsubst
@vaex.utils.gen_to_list
def split(self, frac):
'''Returns a list containing ordered subsets of the DataFrame.
{note_copy}
Example:
>>> import vaex
>>> df = vaex.from_arrays(x = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9])
>>> for dfs in df.split(frac=0.3):
... print(dfs.x.values)
...
[0 1 3]
[3 4 5 6 7 8 9]
>>> for split in df.split(frac=[0.2, 0.3, 0.5]):
... print(dfs.x.values)
[0 1]
[2 3 4]
[5 6 7 8 9]
:param int/list frac: If int will split the DataFrame in two portions, the first of which will have size as specified by this parameter. If list, the generator will generate as many portions as elements in the list, where each element defines the relative fraction of that portion.
:return: A list of DataFrames.
:rtype: list
'''
self = self.extract()
if _issequence(frac):
# make sure it is normalized
total = sum(frac)
frac = [k / total for k in frac]
else:
assert frac <= 1, "fraction should be <= 1"
frac = [frac, 1 - frac]
offsets = np.round(np.cumsum(frac) * len(self)).astype(np.int64)
start = 0
for offset in offsets:
yield self[start:offset]
start = offset
@docsubst
def sort(self, by, ascending=True, kind='quicksort'):
'''Return a sorted DataFrame, sorted by the expression 'by'
The kind keyword is ignored if doing multi-key sorting.
{note_copy}
{note_filter}
Example:
>>> import vaex, numpy as np
>>> df = vaex.from_arrays(s=np.array(['a', 'b', 'c', 'd']), x=np.arange(1,5))
>>> df['y'] = (df.x-1.8)**2
>>> df
# s x y
0 a 1 0.64
1 b 2 0.04
2 c 3 1.44
3 d 4 4.84
>>> df.sort('y', ascending=False) # Note: passing '(x-1.8)**2' gives the same result
# s x y
0 d 4 4.84
1 c 3 1.44
2 a 1 0.64
3 b 2 0.04
:param str or expression by: expression to sort by
:param bool ascending: ascending (default, True) or descending (False)
:param str kind: kind of algorithm to use (passed to numpy.argsort)
'''
self = self.trim()
if not isinstance(by, list):
values = self.evaluate(by)
indices = np.argsort(values, kind=kind)
if isinstance(by, (list, tuple)):
by = _ensure_strings_from_expressions(by)[::-1]
values = self.evaluate(by)
indices = np.lexsort(values)
if not ascending:
indices = indices[::-1].copy() # this may be used a lot, so copy for performance
return self.take(indices)
@docsubst
def fillna(self, value, column_names=None, prefix='__original_', inplace=False):
'''Return a DataFrame, where missing values/NaN are filled with 'value'.
The original columns will be renamed, and by default they will be hidden columns. No data is lost.
{note_copy}
{note_filter}
Example:
>>> import vaex
>>> import numpy as np
>>> x = np.array([3, 1, np.nan, 10, np.nan])
>>> df = vaex.from_arrays(x=x)
>>> df_filled = df.fillna(value=-1, column_names=['x'])
>>> df_filled
# x
0 3
1 1
2 -1
3 10
4 -1
:param float value: The value to use for filling nan or masked values.
:param bool fill_na: If True, fill np.nan values with `value`.
:param bool fill_masked: If True, fill masked values with `values`.
:param list column_names: List of column names in which to fill missing values.
:param str prefix: The prefix to give the original columns.
:param inplace: {inplace}
'''
df = self.trim(inplace=inplace)
column_names = column_names or list(self)
for name in column_names:
column = df.columns.get(name)
df[name] = df.func.fillna(df[name], value)
return df
def materialize(self, virtual_column, inplace=False):
'''Returns a new DataFrame where the virtual column is turned into an in memory numpy array.
Example:
>>> x = np.arange(1,4)
>>> y = np.arange(2,5)
>>> df = vaex.from_arrays(x=x, y=y)
>>> df['r'] = (df.x**2 + df.y**2)**0.5 # 'r' is a virtual column (computed on the fly)
>>> df = df.materialize('r') # now 'r' is a 'real' column (i.e. a numpy array)
:param inplace: {inplace}
'''
df = self.trim(inplace=inplace)
virtual_column = _ensure_string_from_expression(virtual_column)
if virtual_column not in df.virtual_columns:
raise KeyError('Virtual column not found: %r' % virtual_column)
ar = df.evaluate(virtual_column, filtered=False)
del df[virtual_column]
df.add_column(virtual_column, ar)
return df
def _lazy_materialize(self, *virtual_columns):
'''Returns a new DataFrame where the virtual column is turned into an lazily evaluated column.'''
df = self.trim()
virtual_columns = _ensure_strings_from_expression(virtual_columns)
for name in virtual_columns:
if name not in df.virtual_columns:
raise KeyError('Virtual column not found: %r' % virtual_column)
column = ColumnConcatenatedLazy(self[name])
del df[virtual_column]
df.add_column(name, column)
return df
def get_selection(self, name="default"):
"""Get the current selection object (mostly for internal use atm)."""
name = _normalize_selection_name(name)
selection_history = self.selection_histories[name]
index = self.selection_history_indices[name]
if index == -1:
return None
else:
return selection_history[index]
def selection_undo(self, name="default", executor=None):
"""Undo selection, for the name."""
logger.debug("undo")
executor = executor or self.executor
assert self.selection_can_undo(name=name)
selection_history = self.selection_histories[name]
index = self.selection_history_indices[name]
self.selection_history_indices[name] -= 1
self.signal_selection_changed.emit(self, name)
logger.debug("undo: selection history is %r, index is %r", selection_history, self.selection_history_indices[name])
def selection_redo(self, name="default", executor=None):
"""Redo selection, for the name."""
logger.debug("redo")
executor = executor or self.executor
assert self.selection_can_redo(name=name)
selection_history = self.selection_histories[name]
index = self.selection_history_indices[name]
next = selection_history[index + 1]
self.selection_history_indices[name] += 1
self.signal_selection_changed.emit(self, name)
logger.debug("redo: selection history is %r, index is %r", selection_history, index)
def selection_can_undo(self, name="default"):
"""Can selection name be undone?"""
return self.selection_history_indices[name] > -1
def selection_can_redo(self, name="default"):
"""Can selection name be redone?"""
return (self.selection_history_indices[name] + 1) < len(self.selection_histories[name])
def select(self, boolean_expression, mode="replace", name="default", executor=None):
"""Perform a selection, defined by the boolean expression, and combined with the previous selection using the given mode.
Selections are recorded in a history tree, per name, undo/redo can be done for them separately.
:param str boolean_expression: Any valid column expression, with comparison operators
:param str mode: Possible boolean operator: replace/and/or/xor/subtract
:param str name: history tree or selection 'slot' to use
:param executor:
:return:
"""
boolean_expression = _ensure_string_from_expression(boolean_expression)
if boolean_expression is None and not self.has_selection(name=name):
pass # we don't want to pollute the history with many None selections
self.signal_selection_changed.emit(self, name) # TODO: unittest want to know, does this make sense?
else:
def create(current):
return selections.SelectionExpression(boolean_expression, current, mode) if boolean_expression else None
self._selection(create, name)
def select_non_missing(self, drop_nan=True, drop_masked=True, column_names=None, mode="replace", name="default"):
"""Create a selection that selects rows having non missing values for all columns in column_names.
The name reflects Pandas, no rows are really dropped, but a mask is kept to keep track of the selection
:param drop_nan: drop rows when there is a NaN in any of the columns (will only affect float values)
:param drop_masked: drop rows when there is a masked value in any of the columns
:param column_names: The columns to consider, default: all (real, non-virtual) columns
:param str mode: Possible boolean operator: replace/and/or/xor/subtract
:param str name: history tree or selection 'slot' to use
:return:
"""
column_names = column_names or self.get_column_names(virtual=False)
def create(current):
return selections.SelectionDropNa(drop_nan, drop_masked, column_names, current, mode)
self._selection(create, name)
def dropmissing(self, column_names=None):
"""Create a shallow copy of a DataFrame, with filtering set using ismissing.
:param column_names: The columns to consider, default: all (real, non-virtual) columns
:rtype: DataFrame
"""
return self._filter_all(self.func.ismissing, column_names)
def dropnan(self, column_names=None):
"""Create a shallow copy of a DataFrame, with filtering set using isnan.
:param column_names: The columns to consider, default: all (real, non-virtual) columns
:rtype: DataFrame
"""
return self._filter_all(self.func.isnan, column_names)
def dropna(self, column_names=None):
"""Create a shallow copy of a DataFrame, with filtering set using isna.
:param column_names: The columns to consider, default: all (real, non-virtual) columns
:rtype: DataFrame
"""
return self._filter_all(self.func.isna, column_names)
def dropinf(self, column_names=None):
""" Create a shallow copy of a DataFrame, with filtering set using isinf.
:param column_names: The columns to consider, default: all (real, non-virtual) columns
:rtype: DataFrame
"""
return self._filter_all(self.func.isinf, column_names)
def _filter_all(self, f, column_names=None):
column_names = column_names or self.get_column_names(virtual=False)
expression = f(self[column_names[0]])
for column in column_names[1:]:
expression = expression | f(self[column])
return self.filter(~expression, mode='and')
def select_nothing(self, name="default"):
"""Select nothing."""
logger.debug("selecting nothing")
self.select(None, name=name)
self.signal_selection_changed.emit(self, name)
def select_rectangle(self, x, y, limits, mode="replace", name="default"):
"""Select a 2d rectangular box in the space given by x and y, bounded by limits.
Example:
>>> df.select_box('x', 'y', [(0, 10), (0, 1)])
:param x: expression for the x space
:param y: expression fo the y space
:param limits: sequence of shape [(x1, x2), (y1, y2)]
:param mode:
"""
self.select_box([x, y], limits, mode=mode, name=name)
def select_box(self, spaces, limits, mode="replace", name="default"):
"""Select a n-dimensional rectangular box bounded by limits.
The following examples are equivalent:
>>> df.select_box(['x', 'y'], [(0, 10), (0, 1)])
>>> df.select_rectangle('x', 'y', [(0, 10), (0, 1)])
:param spaces: list of expressions
:param limits: sequence of shape [(x1, x2), (y1, y2)]
:param mode:
:param name:
:return:
"""
sorted_limits = [(min(l), max(l)) for l in limits]
expressions = ["((%s) >= %f) & ((%s) <= %f)" % (expression, lmin, expression, lmax) for
(expression, (lmin, lmax)) in zip(spaces, sorted_limits)]
self.select("&".join(expressions), mode=mode, name=name)
def select_circle(self, x, y, xc, yc, r, mode="replace", name="default", inclusive=True):
"""
Select a circular region centred on xc, yc, with a radius of r.
Example:
>>> df.select_circle('x','y',2,3,1)
:param x: expression for the x space
:param y: expression for the y space
:param xc: location of the centre of the circle in x
:param yc: location of the centre of the circle in y
:param r: the radius of the circle
:param name: name of the selection
:param mode:
:return:
"""
# expr = "({x}-{xc})**2 + ({y}-{yc})**2 <={r}**2".format(**locals())
if inclusive:
expr = (self[x] - xc)**2 + (self[y] - yc)**2 <= r**2
else:
expr = (self[x] - xc)**2 + (self[y] - yc)**2 < r**2
self.select(boolean_expression=expr, mode=mode, name=name)
def select_ellipse(self, x, y, xc, yc, width, height, angle=0, mode="replace", name="default", radians=False, inclusive=True):
"""
Select an elliptical region centred on xc, yc, with a certain width, height
and angle.
Example:
>>> df.select_ellipse('x','y', 2, -1, 5,1, 30, name='my_ellipse')
:param x: expression for the x space
:param y: expression for the y space
:param xc: location of the centre of the ellipse in x
:param yc: location of the centre of the ellipse in y
:param width: the width of the ellipse (diameter)
:param height: the width of the ellipse (diameter)
:param angle: (degrees) orientation of the ellipse, counter-clockwise
measured from the y axis
:param name: name of the selection
:param mode:
:return:
"""
# Computing the properties of the ellipse prior to selection
if radians:
pass
else:
alpha = np.deg2rad(angle)
xr = width / 2
yr = height / 2
r = max(xr, yr)
a = xr / r
b = yr / r
expr = "(({x}-{xc})*cos({alpha})+({y}-{yc})*sin({alpha}))**2/{a}**2 + (({x}-{xc})*sin({alpha})-({y}-{yc})*cos({alpha}))**2/{b}**2 <= {r}**2".format(**locals())
if inclusive:
expr = ((self[x] - xc) * np.cos(alpha) + (self[y] - yc) * np.sin(alpha))**2 / a**2 + ((self[x] - xc) * np.sin(alpha) - (self[y] - yc) * np.cos(alpha))**2 / b**2 <= r**2
else:
expr = ((self[x] - xc) * np.cos(alpha) + (self[y] - yc) * np.sin(alpha))**2 / a**2 + ((self[x] - xc) * np.sin(alpha) - (self[y] - yc) * np.cos(alpha))**2 / b**2 < r**2
self.select(boolean_expression=expr, mode=mode, name=name)
def select_lasso(self, expression_x, expression_y, xsequence, ysequence, mode="replace", name="default", executor=None):
"""For performance reasons, a lasso selection is handled differently.
:param str expression_x: Name/expression for the x coordinate
:param str expression_y: Name/expression for the y coordinate
:param xsequence: list of x numbers defining the lasso, together with y
:param ysequence:
:param str mode: Possible boolean operator: replace/and/or/xor/subtract
:param str name:
:param executor:
:return:
"""
def create(current):
return selections.SelectionLasso(expression_x, expression_y, xsequence, ysequence, current, mode)
self._selection(create, name, executor=executor)
def select_inverse(self, name="default", executor=None):
"""Invert the selection, i.e. what is selected will not be, and vice versa
:param str name:
:param executor:
:return:
"""
def create(current):
return selections.SelectionInvert(current)
self._selection(create, name, executor=executor)
def set_selection(self, selection, name="default", executor=None):
"""Sets the selection object
:param selection: Selection object
:param name: selection 'slot'
:param executor:
:return:
"""
def create(current):
return selection
self._selection(create, name, executor=executor, execute_fully=True)
def _selection(self, create_selection, name, executor=None, execute_fully=False):
"""select_lasso and select almost share the same code"""
selection_history = self.selection_histories[name]
previous_index = self.selection_history_indices[name]
current = selection_history[previous_index] if selection_history else None
selection = create_selection(current)
executor = executor or self.executor
selection_history.append(selection)
self.selection_history_indices[name] += 1
# clip any redo history
del selection_history[self.selection_history_indices[name]:-1]
self.signal_selection_changed.emit(self, name)
result = vaex.promise.Promise.fulfilled(None)
logger.debug("select selection history is %r, index is %r", selection_history, self.selection_history_indices[name])
return result
def has_selection(self, name="default"):
"""Returns True if there is a selection with the given name."""
return self.get_selection(name) is not None
def __setitem__(self, name, value):
'''Convenient way to add a virtual column / expression to this DataFrame.
Example:
>>> import vaex, numpy as np
>>> df = vaex.example()
>>> df['r'] = np.sqrt(df.x**2 + df.y**2 + df.z**2)
>>> df.r
<vaex.expression.Expression(expressions='r')> instance at 0x121687e80 values=[2.9655450396553587, 5.77829281049018, 6.99079603950256, 9.431842752707537, 0.8825613121347967 ... (total 330000 values) ... 7.453831761514681, 15.398412491068198, 8.864250273925633, 17.601047186042507, 14.540181524970293]
'''
if isinstance(name, six.string_types):
if isinstance(value, supported_column_types):
self.add_column(name, value)
else:
self.add_virtual_column(name, value)
else:
raise TypeError('__setitem__ only takes strings as arguments, not {}'.format(type(name)))
def drop_filter(self, inplace=False):
"""Removes all filters from the DataFrame"""
df = self if inplace else self.copy()
df.select_nothing(name=FILTER_SELECTION_NAME)
df._invalidate_caches()
return df
def filter(self, expression, mode="and"):
"""General version of df[<boolean expression>] to modify the filter applied to the DataFrame.
See :func:`DataFrame.select` for usage of selection.
Note that using `df = df[<boolean expression>]`, one can only narrow the filter (i.e. only less rows
can be selected). Using the filter method, and a different boolean mode (e.g. "or") one can actually
cause more rows to be selected. This differs greatly from numpy and pandas for instance, which can only
narrow the filter.
Example:
>>> import vaex
>>> import numpy as np
>>> x = np.arange(10)
>>> df = vaex.from_arrays(x=x, y=x**2)
>>> df
# x y
0 0 0
1 1 1
2 2 4
3 3 9
4 4 16
5 5 25
6 6 36
7 7 49
8 8 64
9 9 81
>>> dff = df[df.x<=2]
>>> dff
# x y
0 0 0
1 1 1
2 2 4
>>> dff = dff.filter(dff.x >=7, mode="or")
>>> dff
# x y
0 0 0
1 1 1
2 2 4
3 7 49
4 8 64
5 9 81
"""
df = self.copy()
df.select(expression, name=FILTER_SELECTION_NAME, mode=mode)
df._cached_filtered_length = None # invalide cached length
# WARNING: this is a special case where we create a new filter
# the cache mask chunks still hold references to views on the old
# mask, and this new mask will be filled when required
df._selection_masks[FILTER_SELECTION_NAME] = vaex.superutils.Mask(df._length_unfiltered)
return df
def __getitem__(self, item):
"""Convenient way to get expressions, (shallow) copies of a few columns, or to apply filtering.
Example:
>>> df['Lz'] # the expression 'Lz
>>> df['Lz/2'] # the expression 'Lz/2'
>>> df[["Lz", "E"]] # a shallow copy with just two columns
>>> df[df.Lz < 0] # a shallow copy with the filter Lz < 0 applied
"""
if isinstance(item, int):
names = self.get_column_names()
return [self.evaluate(name, item, item+1)[0] for name in names]
elif isinstance(item, six.string_types):
if hasattr(self, item) and isinstance(getattr(self, item), Expression):
return getattr(self, item)
# if item in self.virtual_columns:
# return Expression(self, self.virtual_columns[item])
# if item in self._virtual_expressions:
# return self._virtual_expressions[item]
item = vaex.utils.valid_expression(self.get_column_names(), item)
self.validate_expression(item)
return Expression(self, item) # TODO we'd like to return the same expression if possible
elif isinstance(item, Expression):
expression = item.expression
return self.filter(expression)
elif isinstance(item, (tuple, list)):
df = self
if isinstance(item[0], slice):
df = df[item[0]]
if len(item) > 1:
if isinstance(item[1], int):
name = self.get_column_names()[item[1]]
return df[name]
elif isinstance(item[1], slice):
names = self.get_column_names().__getitem__(item[1])
return df[names]
for expression in item:
self.validate_expression(expression)
df = self.copy(column_names=item)
return df
elif isinstance(item, slice):
start, stop, step = item.start, item.stop, item.step
start = start or 0
stop = stop or len(self)
if start < 0:
start = len(self)+start
if stop < 0:
stop = len(self)+stop
stop = min(stop, len(self))
assert step in [None, 1]
if self.filtered:
count_check = self.count() # fill caches and masks
mask = self._selection_masks[FILTER_SELECTION_NAME]
start, stop = mask.indices(start, stop-1) # -1 since it is inclusive
assert start != -1
assert stop != -1
stop = stop+1 # +1 to make it inclusive
df = self.trim()
df.set_active_range(start, stop)
return df.trim()
def __delitem__(self, item):
'''Removes a (virtual) column from the DataFrame.
Note: this does not check if the column is used in a virtual expression or in the filter\
and may lead to issues. It is safer to use :meth:`drop`.
'''
if isinstance(item, Expression):
name = item.expression
else:
name = item
if name in self.columns:
del self.columns[name]
self.column_names.remove(name)
elif name in self.virtual_columns:
del self.virtual_columns[name]
del self._virtual_expressions[name]
self.column_names.remove(name)
else:
raise KeyError('no such column or virtual_columns named %r' % name)
self.signal_column_changed.emit(self, name, "delete")
if hasattr(self, name):
try:
if isinstance(getattr(self, name), Expression):
delattr(self, name)
except:
pass
@docsubst
def drop(self, columns, inplace=False, check=True):
"""Drop columns (or a single column).
:param columns: List of columns or a single column name
:param inplace: {inplace}
:param check: When true, it will check if the column is used in virtual columns or the filter, and hide it instead.
"""
columns = _ensure_list(columns)
columns = _ensure_strings_from_expressions(columns)
df = self if inplace else self.copy()
depending_columns = df._depending_columns(columns_exclude=columns)
for column in columns:
if check and column in depending_columns:
df._hide_column(column)
else:
del df[column]
return df
def _hide_column(self, column):
'''Hides a column by prefixing the name with \'__\''''
column = _ensure_string_from_expression(column)
new_name = self._find_valid_name('__' + column)
self._rename(column, new_name)
return new_name
def _find_valid_name(self, initial_name):
'''Finds a non-colliding name by optional postfixing'''
return vaex.utils.find_valid_name(initial_name, used=self.get_column_names(hidden=True))
def _depending_columns(self, columns=None, columns_exclude=None, check_filter=True):
'''Find all depending column for a set of column (default all), minus the excluded ones'''
columns = set(columns or self.get_column_names(hidden=True))
if columns_exclude:
columns -= set(columns_exclude)
depending_columns = set()
for column in columns:
expression = self._expr(column)
depending_columns |= expression.variables()
depending_columns -= set(columns)
if check_filter:
if self.filtered:
selection = self.get_selection(FILTER_SELECTION_NAME)
depending_columns |= selection._depending_columns(self)
return depending_columns
def iterrows(self):
columns = self.get_column_names()
for i in range(len(self)):
yield i, {key: self.evaluate(key, i, i+1)[0] for key in columns}
#return self[i]
def __iter__(self):
"""Iterator over the column names."""
return iter(list(self.get_column_names()))
def _root_nodes(self):
"""Returns a list of string which are the virtual columns that are not used in any other virtual column."""
# these lists (~used as ordered set) keep track of leafes and root nodes
# root nodes
root_nodes = []
leafes = []
def walk(node):
# this function recursively walks the expression graph
if isinstance(node, six.string_types):
# we end up at a leaf
leafes.append(node)
if node in root_nodes: # so it cannot be a root node
root_nodes.remove(node)
else:
node_repr, fname, fobj, deps = node
if node_repr in self.virtual_columns:
# we encountered a virtual column, similar behaviour as leaf
leafes.append(node_repr)
if node_repr in root_nodes:
root_nodes.remove(node_repr)
# resursive part
for dep in deps:
walk(dep)
for column in self.virtual_columns.keys():
if column not in leafes:
root_nodes.append(column)
node = self[column]._graph()
# we don't do the virtual column itself, just it's depedencies
node_repr, fname, fobj, deps = node
for dep in deps:
walk(dep)
return root_nodes
def _graphviz(self, dot=None):
"""Return a graphviz.Digraph object with a graph of all virtual columns"""
from graphviz import Digraph
dot = dot or Digraph(comment='whole dataframe')
root_nodes = self._root_nodes()
for column in root_nodes:
self[column]._graphviz(dot=dot)
return dot
def _get_task_agg(self, grid):
new_task = False
with self.executor.lock:
# if we did not create a task yet for this grid, or it was already scheduled for execution
if grid not in self._task_aggs or self._task_aggs[grid] not in self.executor.tasks:
# we create a new one
self._task_aggs[grid] = vaex.tasks.TaskAggregations(self, grid)
new_task = True
return self._task_aggs[grid], new_task
@docsubst
@stat_1d
def _agg(self, aggregator, grid, selection=False, delay=False, progress=None):
"""
:param selection: {selection}
:param delay: {delay}
:param progress: {progress}
:return: {return_stat_scalar}
"""
task_agg, new_task = self._get_task_agg(grid)
sub_task = aggregator.add_operations(task_agg)
if new_task:
self.executor.schedule(task_agg)
return self._delay(delay, sub_task)
def _binner(self, expression, limits=None, shape=None, selection=None, delay=False):
expression = str(expression)
if limits is not None and not isinstance(limits, (tuple, str)):
limits = tuple(limits)
key = (expression, limits, shape)
if key not in self._binners:
if expression in self._categories:
N = self._categories[expression]['N']
min_value = self._categories[expression]['min_value']
binner = self._binner_ordinal(expression, N, min_value)
self._binners[key] = vaex.promise.Promise.fulfilled(binner)
else:
self._binners[key] = vaex.promise.Promise()
@delayed
def create_binner(limits):
return self._binner_scalar(expression, limits, shape)
self._binners[key] = create_binner(self.limits(expression, limits, selection=selection, delay=True))
return self._delay(delay, self._binners[key])
def _grid(self, binners):
key = tuple(binners)
if key in self._grids:
return self._grids[key]
else:
self._grids[key] = grid = vaex.superagg.Grid(binners)
return grid
def _binner_scalar(self, expression, limits, shape):
type = vaex.utils.find_type_from_dtype(vaex.superagg, "BinnerScalar_", self.data_type(expression))
vmin, vmax = limits
return type(expression, vmin, vmax, shape)
def _binner_ordinal(self, expression, ordinal_count, min_value=0):
expression = _ensure_string_from_expression(expression)
type = vaex.utils.find_type_from_dtype(vaex.superagg, "BinnerOrdinal_", self.data_type(expression))
return type(expression, ordinal_count, min_value)
def _create_grid(self, binby, limits, shape, selection=None, delay=False):
if isinstance(binby, (list, tuple)):
binbys = binby
else:
binbys = [binby]
binbys = _ensure_strings_from_expressions(binbys)
for expression in binbys:
if expression:
self.validate_expression(expression)
binners = []
if len(binbys):
limits = _expand_limits(limits, len(binbys))
else:
limits = []
shapes = _expand_shape(shape, len(binbys))
for binby, limits1, shape in zip(binbys, limits, shapes):
binners.append(self._binner(binby, limits1, shape, selection, delay=True))
@delayed
def finish(*binners):
return self._grid(binners)
return self._delay(delay, finish(*binners))
DataFrame.__hidden__ = {}
hidden = [name for name, func in vars(DataFrame).items() if getattr(func, '__hidden__', False)]
for name in hidden:
DataFrame.__hidden__[name] = getattr(DataFrame, name)
delattr(DataFrame, name)
del hidden
class ColumnProxy(collections.abc.MutableMapping):
def __init__(self, df):
self.df = df
@property
def dataset(self):
return self.df.dataset
def __delitem__(self, item):
assert item in self.dataset
self.df._dataset = self.dataset.dropped(item)
def __len__(self):
return len(self.dataset)
def __setitem__(self, item, value):
# import pdb; pdb.set_trace()
left = self.dataset
if item in self.dataset:
left = left.dropped(item)
right = vaex.dataset.DatasetArrays({item: value})
merged = left.merged(right)
self.df._dataset = merged
self.df._length = len(value)
if self.df._length_unfiltered is None:
self.df._length_unfiltered = self.df._length
self.df._length_original = self.df._length
self.df._index_end = self.df._length_unfiltered
def __iter__(self):
return iter(self.dataset)
def __getitem__(self, item):
return self.dataset[item]
class DataFrameLocal(DataFrame):
"""Base class for DataFrames that work with local file/data"""
def __init__(self, dataset=None):
if dataset is None:
dataset = vaex.dataset.DatasetArrays()
super(DataFrameLocal, self).__init__(dataset.keys())
self._dataset = dataset
if hasattr(dataset, 'units'):
self.units.update(dataset.units)
if hasattr(dataset, 'ucds'):
self.ucds.update(dataset.ucds)
self.column_names = list(self.dataset)
for column_name in self.column_names:
self._initialize_column(column_name)
if len(self.dataset):
ar = self.dataset[list(self.dataset.keys())[0]]
self._length = len(ar)
if self._length_unfiltered is None:
self._length_unfiltered = self._length
self._length_original = self._length
self._index_end = self._length_unfiltered
# self.path = dataset.path
self.mask = None
self.columns = ColumnProxy(self)
@property
def dataset(self):
return self._dataset
@dataset.setter
def dataset(self, dataset):
self._dataset = dataset
self._length_original = dataset.row_count
self._length_unfiltered = self._length_original
self._cached_filtered_length = None
self._index_start = 0
self._index_end = self._length_original
self._invalidate_selection_cache()
def hashed(self) -> DataFrame:
'''Return a DataFrame with a hashed dataset'''
df = self.copy()
df.dataset = df.dataset.hashed()
return df
def _readonly(self, inplace=False):
# make arrays read only if possib;e
df = self if inplace else self.copy()
for key, ar in self.columns.items():
if isinstance(ar, np.ndarray):
df.columns[key] = ar = ar.view() # make new object so we don't modify others
ar.flags['WRITEABLE'] = False
return df
@docsubst
def categorize(self, column, min_value=0, max_value=None, labels=None, inplace=False):
"""Mark column as categorical.
This may help speed up calculations using integer columns between a range of [min_value, max_value].
If max_value is not given, the [min_value and max_value] are calcuated from the data.
Example:
>>> import vaex
>>> df = vaex.from_arrays(year=[2012, 2015, 2019], weekday=[0, 4, 6])
>>> df.categorize('year', min_value=2020, max_value=2019)
>>> df.categorize('weekday', labels=['Mon', 'Tue', 'Wed', 'Thu', 'Fri', 'Sat', 'Sun'])
:param column: column to assume is categorical.
:param labels: labels to associate to the values between min_value and max_value
:param min_value: minimum integer value (if max_value is not given, this is calculated)
:param max_value: maximum integer value (if max_value is not given, this is calculated)
:param labels: Labels to associate to each value, list(range(min_value, max_value+1)) by default
:param inplace: {inplace}
"""
df = self if inplace else self.copy()
column = _ensure_string_from_expression(column)
if max_value is not None:
labels = list(range(min_value, max_value+1))
N = len(labels)
else:
vmin, vmax = df.minmax(column)
if labels is None:
N = int(vmax + 1)
labels = list(range(vmin, vmax+1))
min_value = vmin
else:
min_value = vmin
if (vmax - vmin) >= len(labels):
raise ValueError('value of {} found, which is larger than number of labels {}'.format(vmax, len(labels)))
df._categories[column] = dict(labels=labels, N=len(labels), min_value=min_value)
return df
def ordinal_encode(self, column, values=None, inplace=False):
"""Encode column as ordinal values and mark it as categorical.
The existing column is renamed to a hidden column and replaced by a numerical columns
with values between [0, len(values)-1].
"""
column = _ensure_string_from_expression(column)
df = self if inplace else self.copy()
# for the codes, we need to work on the unfiltered dataset, since the filter
# may change, and we also cannot add an array that is smaller in length
df_unfiltered = df.copy()
# maybe we need some filter manipulation methods
df_unfiltered.select_nothing(name=FILTER_SELECTION_NAME)
df_unfiltered._length_unfiltered = df._length_original
df_unfiltered.set_active_range(0, df._length_original)
# codes point to the index of found_values
# meaning: found_values[codes[0]] == ds[column].values[0]
found_values, codes = df_unfiltered.unique(column, return_inverse=True)
if isinstance(found_values, array_types.supported_arrow_array_types):
# elements of arrow arrays are not in arrow arrays, e.g. ar[0] in ar is False
# see tests/arrow/assumptions_test.py::test_in_pylist
found_values = found_values.to_pylist()
if values is None:
values = found_values
else:
# we have specified which values we should support, anything
# not found will be masked
translation = np.zeros(len(found_values), dtype=np.uint64)
# mark values that are in the column, but not in values with a special value
missing_value = len(found_values)
for i, found_value in enumerate(found_values):
try:
found_value = found_value.decode('ascii')
except:
pass
if found_value not in values: # not present, we need a missing value
translation[i] = missing_value
else:
translation[i] = values.index(found_value)
codes = translation[codes]
if missing_value in translation:
# all special values will be marked as missing
codes = np.ma.masked_array(codes, codes==missing_value)
original_column = df.rename(column, '__original_' + column, unique=True)
df.add_column(column, codes)
df._categories[column] = dict(labels=values, N=len(values), min_value=0)
return df
# for backward compatibility
label_encode = _hidden(vaex.utils.deprecated('use is_category')(ordinal_encode))
@property
def data(self):
"""Gives direct access to the data as numpy arrays.
Convenient when working with IPython in combination with small DataFrames, since this gives tab-completion.
Only real columns (i.e. no virtual) columns can be accessed, for getting the data from virtual columns, use
DataFrame.evaluate(...).
Columns can be accessed by their names, which are attributes. The attributes are of type numpy.ndarray.
Example:
>>> df = vaex.example()
>>> r = np.sqrt(df.data.x**2 + df.data.y**2)
"""
class Datas(object):
pass
datas = Datas()
for name, array in self.columns.items():
setattr(datas, name, array)
return datas
def copy(self, column_names=None, virtual=True):
df = DataFrameLocal()
df._length_unfiltered = self._length_unfiltered
df._length_original = self._length_original
df._cached_filtered_length = self._cached_filtered_length
df._index_end = self._index_end
df._index_start = self._index_start
df._active_fraction = self._active_fraction
df._renamed_columns = list(self._renamed_columns)
df.units.update(self.units)
df.variables.update(self.variables) # we add all, could maybe only copy used
df._categories.update(self._categories)
copy_all = column_names is None
all_column_names = self.get_column_names(hidden=True)
if column_names is None:
column_names = all_column_names.copy()
# put in the selections (thus filters) in place
# so drop moves instead of really dropping it
df.functions.update(self.functions)
for key, value in self.selection_histories.items():
# TODO: selection_histories begin a defaultdict always gives
# us the filtered selection, so check if we really have a
# selection
if self.get_selection(key):
df.selection_histories[key] = list(value)
# the filter should never be modified, so we can share a reference
# except when we add filter on filter using
# df = df[df.x>0]
# df = df[df.x < 10]
# in that case we make a copy in __getitem__
if key == FILTER_SELECTION_NAME:
df._selection_masks[key] = self._selection_masks[key]
else:
df._selection_masks[key] = vaex.superutils.Mask(df._length_original)
# and make sure the mask is consistent with the cache chunks
np.asarray(df._selection_masks[key])[:] = np.asarray(self._selection_masks[key])
for key, value in self.selection_history_indices.items():
if self.get_selection(key):
df.selection_history_indices[key] = value
# we can also copy the caches, which prevents recomputations of selections
df._selection_mask_caches[key] = collections.defaultdict(dict)
df._selection_mask_caches[key].update(self._selection_mask_caches[key])
if copy_all: # fast path
# this is ok, we don't have to reset caches and all, since we do this in copy
df._dataset = self.dataset
df.column_names = list(self.column_names)
df.virtual_columns = self.virtual_columns.copy()
for name in all_column_names:
df._initialize_column(name)
for name, expression in df.virtual_columns.items():
df._virtual_expressions[name] = Expression(df, expression)
else:
# print("-----", column_names)
depending = set()
added = set()
for name in column_names:
# print("add", name)
added.add(name)
if name in self.columns:
column = self.columns[name]
df.add_column(name, column)
if isinstance(column, ColumnSparse):
df._sparse_matrices[name] = self._sparse_matrices[name]
elif name in self.virtual_columns:
if virtual: # TODO: check if the ast is cached
df.add_virtual_column(name, self.virtual_columns[name])
deps = [key for key, value in df._virtual_expressions[name].ast_names.items()]
deps += [key for key, value in df._virtual_expressions[name]._ast_slices.items()]
# print("add virtual", name, df._virtual_expressions[name].expression, deps)
depending.update(deps)
else:
# this might be an expression, create a valid name
valid_name = vaex.utils.find_valid_name(name)
self.validate_expression(name)
# add the expression
df[valid_name] = df._expr(name)
# then get the dependencies
deps = [key for key, value in df._virtual_expressions[valid_name].ast_names.items()]
deps += [key for key, value in df._virtual_expressions[name]._ast_slices.items()]
depending.update(deps)
# print(depending, "after add")
# depending |= column_names
# print(depending)
# print(depending, "before filter")
if self.filtered:
selection = self.get_selection(FILTER_SELECTION_NAME)
depending |= selection._depending_columns(self)
depending.difference_update(added) # remove already added
# print(depending, "after filter")
# return depending_columns
hide = []
while depending:
new_depending = set()
for name in depending:
added.add(name)
if name in self.columns:
# print("add column", name)
df.add_column(name, self.columns[name])
# print("and hide it")
# df._hide_column(name)
hide.append(name)
elif name in self.virtual_columns:
if virtual: # TODO: check if the ast is cached
df.add_virtual_column(name, self.virtual_columns[name])
deps = [key for key, value in self._virtual_expressions[name].ast_names.items()]
deps += [key for key, value in df._virtual_expressions[name]._ast_slices.items()]
new_depending.update(deps)
# df._hide_column(name)
hide.append(name)
elif name in self.variables:
# if must be a variables?
# TODO: what if the variable depends on other variables
# we already add all variables
# df.add_variable(name, self.variables[name])
pass
# print("new_depending", new_depending)
new_depending.difference_update(added)
depending = new_depending
for name in hide:
df._hide_column(name)
df.copy_metadata(self)
return df
def shallow_copy(self, virtual=True, variables=True):
"""Creates a (shallow) copy of the DataFrame.
It will link to the same data, but will have its own state, e.g. virtual columns, variables, selection etc.
"""
df = DataFrameLocal(self.name, self.path, self.column_names)
df.columns.update(self.columns)
df._length_unfiltered = self._length_unfiltered
df._length_original = self._length_original
df._index_end = self._index_end
df._index_start = self._index_start
df._active_fraction = self._active_fraction
if virtual:
df.virtual_columns.update(self.virtual_columns)
if variables:
df.variables.update(self.variables)
# half shallow/deep copy
# for key, value in self.selection_histories.items():
# df.selection_histories[key] = list(value)
# for key, value in self.selection_history_indices.items():
# df.selection_history_indices[key] = value
return df
def is_local(self):
"""The local implementation of :func:`DataFrame.evaluate`, always returns True."""
return True
def length(self, selection=False):
"""Get the length of the DataFrames, for the selection of the whole DataFrame.
If selection is False, it returns len(df).
TODO: Implement this in DataFrameRemote, and move the method up in :func:`DataFrame.length`
:param selection: When True, will return the number of selected rows
:return:
"""
if selection:
return 0 if self.mask is None else np.sum(self.mask)
else:
return len(self)
@_hidden
def __call__(self, *expressions, **kwargs):
"""The local implementation of :func:`DataFrame.__call__`"""
import vaex.legacy
return vaex.legacy.SubspaceLocal(self, expressions, kwargs.get("executor") or self.executor, delay=kwargs.get("delay", False))
def echo(self, arg): return arg
@property
def _dtype(self):
dtypes = [self[k].dtype for k in self.get_column_names()]
if not all([dtypes[0] == dtype for dtype in dtypes]):
return ValueError("Not all dtypes are equal: %r" % dtypes)
return dtypes[0]
@property
def shape(self):
return (len(self), len(self.get_column_names()))
def __array__(self, dtype=None, parallel=True):
"""Gives a full memory copy of the DataFrame into a 2d numpy array of shape (n_rows, n_columns).
Note that the memory order is fortran, so all values of 1 column are contiguous in memory for performance reasons.
Note this returns the same result as:
>>> np.array(ds)
If any of the columns contain masked arrays, the masks are ignored (i.e. the masked elements are returned as well).
"""
if dtype is None:
dtype = np.float64
chunks = []
column_names = self.get_column_names(strings=False)
for name in column_names:
column_type = self.data_type(name, array_type='numpy')
if not np.can_cast(column_type, dtype):
if column_type != dtype:
raise ValueError("Cannot cast %r (of type %r) to %r" % (name, self.data_type(name), dtype))
chunks = self.evaluate(column_names, parallel=parallel, array_type='numpy')
if any(np.ma.isMaskedArray(chunk) for chunk in chunks):
return np.ma.array(chunks, dtype=dtype).T
else:
return np.array(chunks, dtype=dtype).T
def as_arrow(self):
"""Lazily cast all columns to arrow, except object types."""
df = self.copy()
for name in self.get_column_names():
df[name] = df[name].as_arrow()
return df
def as_numpy(self, strict=False):
"""Lazily cast all numerical columns to numpy.
If strict is True, it will also cast non-numerical types.
"""
df = self.copy()
for name in self.get_column_names():
df[name] = df[name].as_numpy(strict=strict)
return df
@vaex.utils.deprecated('use DataFrame.join(other)')
def _hstack(self, other, prefix=None):
"""Join the columns of the other DataFrame to this one, assuming the ordering is the same"""
assert len(self) == len(other), "does not make sense to horizontally stack DataFrames with different lengths"
for name in other.get_column_names():
if prefix:
new_name = prefix + name
else:
new_name = name
self.add_column(new_name, other.columns[name])
def concat(self, *others):
"""Concatenates multiple DataFrames, adding the rows of the other DataFrame to the current, returned in a new DataFrame.
No copy of the data is made.
:param others: The other DataFrames that are concatenated with this DataFrame
:return: New DataFrame with the rows concatenated
:rtype: DataFrameLocal
"""
# to reduce complexity, we 'extract' the dataframes (i.e. remove filter)
dfs = [self, *others]
dfs = [df.extract() for df in dfs]
first, *tail = dfs
common = []
df_column_names = [df.get_column_names(virtual=False, hidden=True) for df in dfs] # for performance
df_all_column_names = [df.get_column_names(virtual=True, hidden=True) for df in dfs] # for performance
for name in df_column_names[0]:
for df, column_names in zip(tail, df_column_names[1:]):
if name not in column_names:
if name in df_all_column_names: # it's a virtual column, while in first a real column
# upgrade to a column, so Dataset's concat works
dfs[dfs.index(df)] = df._lazy_materialize(name)
else:
pass # TODO: add columns with empty/null values
# concatenate all datasets
dataset = first.dataset.concat(*[df.dataset for df in tail])
df_concat = vaex.dataframe.DataFrameLocal(dataset)
for name in list(first.virtual_columns.keys()):
if all([first.virtual_columns[name] == df.virtual_columns.get(name, None) for df in tail]):
df_concat.add_virtual_column(name, first.virtual_columns[name])
else:
df_concat.columns[name] = ColumnConcatenatedLazy([df[name] for df in dfs])
df_concat.column_names.append(name)
df_concat._save_assign_expression(name)
for df in dfs:
for name, value in list(df.variables.items()):
if name not in df_concat.variables:
df_concat.set_variable(name, value, write=False)
return df_concat
def _invalidate_caches(self):
self._invalidate_selection_cache()
self._cached_filtered_length = None
def _invalidate_selection_cache(self):
self._selection_mask_caches.clear()
for key in self._selection_masks.keys():
self._selection_masks[key] = vaex.superutils.Mask(self._length_unfiltered)
def _filtered_range_to_unfiltered_indices(self, i1, i2):
assert self.filtered
count = self.count() # force the cache to be filled
assert i2 <= count
cache = self._selection_mask_caches[FILTER_SELECTION_NAME]
mask_blocks = iter(sorted(
[(k1, k2, block) for (k1, k2), (selection, block) in cache.items()],
key=lambda item: item[0]))
done = False
offset_unfiltered = 0 # points to the unfiltered arrays
offset_filtered = 0 # points to the filtered array
indices = []
while not done:
unfiltered_i1, unfiltered_i2, block = next(mask_blocks)
count = block.sum()
if (offset_filtered + count) < i1: # i1 does not start in this block
assert unfiltered_i2 == offset_unfiltered + len(block)
offset_unfiltered = unfiltered_i2
offset_filtered += count
else:
for block_index in range(len(block)):
if block[block_index]: # if not filtered, we go to the next index
if i1 <= offset_filtered < i2: # if this is in the range we want...
indices.append(offset_unfiltered)
offset_filtered += 1
offset_unfiltered += 1
done = offset_filtered >= i2
return np.array(indices, dtype=np.int64)
def _evaluate(self, expression, i1, i2, out=None, selection=None, internal=False, filter_mask=None):
scope = scopes._BlockScope(self, i1, i2, mask=filter_mask, **self.variables)
if out is not None:
scope.buffers[expression] = out
value = scope.evaluate(expression)
if isinstance(value, ColumnString) and not internal:
value = value.to_numpy()
return value
def _unfiltered_chunk_slices(self, chunk_size):
logical_length = len(self)
if self.filtered:
full_mask = self._selection_masks[FILTER_SELECTION_NAME]
# TODO: python 3, use yield from
for item in vaex.utils.subdivide_mask(full_mask, max_length=chunk_size, logical_length=logical_length):
yield item
else:
for i1, i2 in vaex.utils.subdivide(logical_length, max_length=chunk_size):
yield i1, i2, i1, i2
def _evaluate_implementation(self, expression, i1=None, i2=None, out=None, selection=None, filtered=True, array_type=None, parallel=True, chunk_size=None, raw=False):
"""The real implementation of :func:`DataFrame.evaluate` (not returning a generator).
:param raw: Whether indices i1 and i2 refer to unfiltered (raw=True) or 'logical' offsets (raw=False)
"""
# expression = _ensure_string_from_expression(expression)
was_list, [expressions] = vaex.utils.listify(expression)
expressions = vaex.utils._ensure_strings_from_expressions(expressions)
column_names = self.get_column_names(hidden=True)
expressions = [vaex.utils.valid_expression(column_names, k) for k in expressions]
selection = _ensure_strings_from_expressions(selection)
max_stop = (len(self) if (self.filtered and filtered) else self.length_unfiltered())
i1 = i1 or 0
i2 = i2 or max_stop
if parallel:
df = self
# first, reduce complexity for the parallel path
if self.filtered and not filtered:
df = df.drop_filter()
if i1 != 0 or i2 != max_stop:
if not raw and self.filtered and filtered:
count_check = len(self) # fill caches and masks
mask = self._selection_masks[FILTER_SELECTION_NAME]
i1, i2 = mask.indices(i1, i2-1)
assert i1 != -1
i2 += 1
# TODO: performance: can we collapse the two trims in one?
df = df.trim()
df.set_active_range(i1, i2)
df = df.trim()
else:
df = self
# print(df.columns['x'], i1, i2)
expression = expressions[0]
# here things are simpler or we don't go parallel
mask = None
if parallel:
use_filter = df.filtered and filtered
length = df.length_unfiltered()
arrays = {}
# maps to a dict of start_index -> apache arrow array (a chunk)
chunks_map = {}
dtypes = {}
shapes = {}
virtual = set()
# TODO: For NEP branch: dtype -> dtype_evaluate
expression_to_evaluate = list(set(expressions)) # lets assume we have to do them all
for expression in set(expressions):
dtypes[expression] = dtype = df.data_type(expression)
if expression not in df.columns:
virtual.add(expression)
# since we will use pre_filter=True, we'll get chunks of the data at unknown offset
# so we'll also have to stitch those back together
if use_filter or selection:# or not isinstance(dtype, np.dtype):
chunks_map[expression] = {}
else:
# we know exactly where to place the chunks, so we pre allocate the arrays
if expression in virtual:
if isinstance(dtype, np.dtype):
shape = (length, ) + df._shape_of(expression, filtered=False)[1:]
shapes[expression] = shape
# numpy arrays are fixed length, so we can pre allocate them
if df.is_masked(expression):
arrays[expression] = np.ma.empty(shapes.get(expression, length), dtype=dtypes[expression])
else:
arrays[expression] = np.zeros(shapes.get(expression, length), dtype=dtypes[expression])
else:
# TODO: find a way to modify an arrow array inplace, e.g. float64 array
# probably by making an ndarray, and have an Arrow array view that
# fixed_width = False
# try:
# ts.bit_width
# fixed_width = True
# except ValueError:
# pass
# if fixed_width:
chunks_map[expression] = {}
else:
# quick path, we can just copy the column
arrays[expression] = df.columns[expression]
start, end = df._index_start, df._index_end
if start != 0 or end != len(arrays[expression]):
arrays[expression] = arrays[expression][start:end]
if isinstance(arrays[expression], vaex.column.Column):
arrays[expression] = arrays[expression][0:end-start] # materialize fancy columns (lazy, indexed)
expression_to_evaluate.remove(expression)
def assign(thread_index, i1, i2, *blocks):
for i, expr in enumerate(expression_to_evaluate):
if expr in chunks_map:
# for non-primitive arrays we simply keep a reference to the chunk
chunks_map[expr][i1] = blocks[i]
else:
# for primitive arrays (and no filter/selection) we directly add it to the right place in contiguous numpy array
arrays[expr][i1:i2] = blocks[i]
if expression_to_evaluate:
df.map_reduce(assign, lambda *_: None, expression_to_evaluate, ignore_filter=False, selection=selection, pre_filter=use_filter, info=True, to_numpy=False)
def finalize_result(expression):
if expression in chunks_map:
# put all chunks in order
chunks = [chunk for (i1, chunk) in sorted(chunks_map[expression].items(), key=lambda i1_and_chunk: i1_and_chunk[0])]
assert len(chunks) > 0
if len(chunks) == 1:
values = array_types.convert(chunks[0], array_type)
else:
values = array_types.convert(chunks, array_type)
return values
else:
return array_types.convert(arrays[expression], array_type)
# print("dsadsa", chunks_map, arrays)
result = [finalize_result(k) for k in expressions]
# print("Result", result)
if not was_list:
result = result[0]
return result
else:
if not raw and self.filtered and filtered:
count_check = len(self) # fill caches and masks
mask = self._selection_masks[FILTER_SELECTION_NAME]
if _DEBUG:
if i1 == 0 and i2 == count_check:
# we cannot check it if we just evaluate a portion
assert not mask.is_dirty()
# assert mask.count() == count_check
i1, i2 = mask.indices(i1, i2-1) # -1 since it is inclusive
assert i1 != -1
assert i2 != -1
i2 = i2+1 # +1 to make it inclusive
values = []
for expression in expressions:
# for both a selection or filtering we have a mask
if selection not in [None, False] or (self.filtered and filtered):
mask = self.evaluate_selection_mask(selection, i1, i2)
scope = scopes._BlockScope(self, i1, i2, mask=mask, **self.variables)
# value = value[mask]
if out is not None:
scope.buffers[expression] = out
value = scope.evaluate(expression)
# if isinstance(value, ColumnString) and not internal:
# value = value.to_numpy()
# print("before", value)
value = array_types.convert(value, array_type)
# print("after", value)
values.append(value)
if not was_list:
return values[0]
return values
def _equals(self, other):
values = self.compare(other)
return values == ([], [], [], [])
def compare(self, other, report_missing=True, report_difference=False, show=10, orderby=None, column_names=None):
"""Compare two DataFrames and report their difference, use with care for large DataFrames"""
if column_names is None:
column_names = self.get_column_names(virtual=False)
for other_column_name in other.get_column_names(virtual=False):
if other_column_name not in column_names:
column_names.append(other_column_name)
different_values = []
missing = []
type_mismatch = []
meta_mismatch = []
assert len(self) == len(other)
if orderby:
index1 = np.argsort(self.columns[orderby])
index2 = np.argsort(other.columns[orderby])
for column_name in column_names:
if column_name not in self.get_column_names(virtual=False):
missing.append(column_name)
if report_missing:
print("%s missing from this DataFrame" % column_name)
elif column_name not in other.get_column_names(virtual=False):
missing.append(column_name)
if report_missing:
print("%s missing from other DataFrame" % column_name)
else:
ucd1 = self.ucds.get(column_name)
ucd2 = other.ucds.get(column_name)
if ucd1 != ucd2:
print("ucd mismatch : %r vs %r for %s" % (ucd1, ucd2, column_name))
meta_mismatch.append(column_name)
unit1 = self.units.get(column_name)
unit2 = other.units.get(column_name)
if unit1 != unit2:
print("unit mismatch : %r vs %r for %s" % (unit1, unit2, column_name))
meta_mismatch.append(column_name)
type1 = self.data_type(column_name)
type2 = other.data_type(column_name)
if not vaex.array_types.same_type(type1, type2):
print("different data types: %s vs %s for %s" % (self.data_type(column_name), other.data_type(column_name), column_name))
type_mismatch.append(column_name)
else:
# a = self.columns[column_name]
# b = other.columns[column_name]
# if self.filtered:
# a = a[self.evaluate_selection_mask(None)]
# if other.filtered:
# b = b[other.evaluate_selection_mask(None)]
a = self.evaluate(column_name, array_type="numpy")
b = other.evaluate(column_name, array_type="numpy")
if orderby:
a = a[index1]
b = b[index2]
def normalize(ar):
if isinstance(ar, pa.Array):
ar = ar.to_pandas().values
# if ar.dtype == str_type:
# return ar
if ar.dtype.kind == "f" and hasattr(ar, "mask"):
mask = ar.mask
ar = ar.copy()
ar[mask] = np.nan
if ar.dtype.kind in "SU":
if hasattr(ar, "mask"):
data = ar.data
else:
data = ar
values = [value.strip() for value in data.tolist()]
if hasattr(ar, "mask"):
ar = np.ma.masked_array(values, ar.mask)
else:
ar = np.array(values)
return ar
def equal_mask(a, b):
a = normalize(a)
b = normalize(b)
boolean_mask = (a == b)
if not self.is_string(column_name) and self.data_type(column_name).kind == 'f': # floats with nan won't equal itself, i.e. NaN != NaN
boolean_mask |= (np.isnan(a) & np.isnan(b))
return boolean_mask
boolean_mask = equal_mask(a, b)
all_equal = np.all(boolean_mask)
if not all_equal:
count = np.sum(~boolean_mask)
print("%s does not match for both DataFrames, %d rows are diffent out of %d" % (column_name, count, len(self)))
different_values.append(column_name)
if report_difference:
indices = np.arange(len(self))[~boolean_mask]
values1 = self.columns[column_name][:][~boolean_mask]
values2 = other.columns[column_name][:][~boolean_mask]
print("\tshowing difference for the first 10")
for i in range(min(len(values1), show)):
try:
diff = values1[i] - values2[i]
except:
diff = "does not exists"
print("%s[%d] == %s != %s other.%s[%d] (diff = %s)" % (column_name, indices[i], values1[i], values2[i], column_name, indices[i], diff))
return different_values, missing, type_mismatch, meta_mismatch
@docsubst
def join(self, other, on=None, left_on=None, right_on=None, lprefix='', rprefix='', lsuffix='', rsuffix='', how='left', allow_duplication=False, inplace=False):
"""Return a DataFrame joined with other DataFrames, matched by columns/expression on/left_on/right_on
If neither on/left_on/right_on is given, the join is done by simply adding the columns (i.e. on the implicit
row index).
Note: The filters will be ignored when joining, the full DataFrame will be joined (since filters may
change). If either DataFrame is heavily filtered (contains just a small number of rows) consider running
:func:`DataFrame.extract` first.
Example:
>>> a = np.array(['a', 'b', 'c'])
>>> x = np.arange(1,4)
>>> ds1 = vaex.from_arrays(a=a, x=x)
>>> b = np.array(['a', 'b', 'd'])
>>> y = x**2
>>> ds2 = vaex.from_arrays(b=b, y=y)
>>> ds1.join(ds2, left_on='a', right_on='b')
:param other: Other DataFrame to join with (the right side)
:param on: default key for the left table (self)
:param left_on: key for the left table (self), overrides on
:param right_on: default key for the right table (other), overrides on
:param lprefix: prefix to add to the left column names in case of a name collision
:param rprefix: similar for the right
:param lsuffix: suffix to add to the left column names in case of a name collision
:param rsuffix: similar for the right
:param how: how to join, 'left' keeps all rows on the left, and adds columns (with possible missing values)
'right' is similar with self and other swapped. 'inner' will only return rows which overlap.
:param bool allow_duplication: Allow duplication of rows when the joined column contains non-unique values.
:param inplace: {inplace}
:return:
"""
inner = False
left = self
right = other
if how == 'left':
pass
elif how == 'right':
left, right = right, left
lprefix, rprefix = rprefix, lprefix
lsuffix, rsuffix = rsuffix, lsuffix
left_on, right_on = right_on, left_on
elif how == 'inner':
inner = True
else:
raise ValueError('join type not supported: {}, only left and right'.format(how))
left = left if inplace else left.copy()
on = _ensure_string_from_expression(on)
left_on = _ensure_string_from_expression(left_on)
right_on = _ensure_string_from_expression(right_on)
left_on = left_on or on
right_on = right_on or on
for name in right:
if left_on and (rprefix + name + rsuffix == lprefix + left_on + lsuffix):
continue # it's ok when we join on the same column name
if name in left and rprefix + name + rsuffix == lprefix + name + lsuffix:
raise ValueError('column name collision: {} exists in both column, and no proper suffix given'
.format(name))
right = right.extract() # get rid of filters and active_range
assert left.length_unfiltered() == left.length_original()
N = left.length_unfiltered()
N_other = len(right)
if left_on is None and right_on is None:
lookup = None
else:
df = left
# we index the right side, this assumes right is smaller in size
index = right._index(right_on)
dtype = left.data_type(left_on)
duplicates_right = index.has_duplicates
if duplicates_right and not allow_duplication:
raise ValueError('This join will lead to duplication of rows which is disabled, pass allow_duplication=True')
# our max value for the lookup table is the row index number, so if we join a small
# df with say 100 rows, we can do it with a int8
lookup_dtype = vaex.utils.required_dtype_for_max(len(right))
# we put in the max value to maximize triggering failures in the case of a bug (we don't want
# to point to row 0 in case we do, we'd rather crash)
lookup = np.full(left._length_original, np.iinfo(lookup_dtype).max, dtype=lookup_dtype)
nthreads = self.executor.thread_pool.nthreads
lookup_masked = [False] * nthreads # does the lookup contain masked/-1 values?
lookup_extra_chunks = []
from vaex.column import _to_string_sequence
def map(thread_index, i1, i2, ar):
if vaex.array_types.is_string_type(dtype):
previous_ar = ar
ar = _to_string_sequence(ar)
if np.ma.isMaskedArray(ar):
mask = np.ma.getmaskarray(ar)
found_masked = index.map_index_masked(ar.data, mask, lookup[i1:i2])
lookup_masked[thread_index] = lookup_masked[thread_index] or found_masked
if duplicates_right:
extra = index.map_index_duplicates(ar.data, mask, i1)
lookup_extra_chunks.append(extra)
else:
found_masked = index.map_index(ar, lookup[i1:i2])
lookup_masked[thread_index] = lookup_masked[thread_index] or found_masked
if duplicates_right:
extra = index.map_index_duplicates(ar, i1)
lookup_extra_chunks.append(extra)
def reduce(a, b):
pass
left.map_reduce(map, reduce, [left_on], delay=False, name='fill looking', info=True, to_numpy=False, ignore_filter=True)
if len(lookup_extra_chunks):
# if the right has duplicates, we increase the left of left, and the lookup array
lookup_left = np.concatenate([k[0] for k in lookup_extra_chunks])
lookup_right = np.concatenate([k[1] for k in lookup_extra_chunks])
left = left.concat(left.take(lookup_left))
lookup = np.concatenate([lookup, lookup_right])
if inner:
left_mask_matched = lookup != -1 # all the places where we found a match to the right
lookup = lookup[left_mask_matched] # filter the lookup table to the right
left_indices_matched = np.where(left_mask_matched)[0] # convert mask to indices for the left
# indices can still refer to filtered rows, so do not drop the filter
left = left.take(left_indices_matched, filtered=False, dropfilter=False)
direct_indices_map = {} # for performance, keeps a cache of two levels of indirection of indices
def mangle_name(prefix, name, suffix):
if name.startswith('__'):
return '__' + prefix + name[2:] + suffix
else:
return prefix + name + suffix
# first, do renaming, so all column names are unique
right_names = right.get_names(hidden=True)
left_names = left.get_names(hidden=True)
for name in right_names:
if name in left_names:
# find a unique name across both dataframe, including the new name for the left
all_names = list(set(right_names + left_names))
all_names.append(mangle_name(lprefix, name, lsuffix)) # we dont want to steal the left's name
all_names.remove(name) # we could even claim the original name
new_name = mangle_name(rprefix, name, rsuffix)
# we will not add this column twice when it is the join column
if new_name != left_on:
if new_name in all_names: # it's still not unique
new_name = vaex.utils.find_valid_name(new_name, all_names)
right.rename(name, new_name)
right_names[right_names.index(name)] = new_name
# and the same for the left
all_names = list(set(right_names + left_names))
all_names.remove(name)
new_name = mangle_name(lprefix, name, lsuffix)
if new_name in all_names: # still not unique
new_name = vaex.utils.find_valid_name(new_name, all_names)
left.rename(name, new_name)
left_names[left_names.index(name)] = new_name
# now we add columns from the right, to the left
right_names = right.get_names(hidden=True)
left_names = left.get_names(hidden=True)
right_columns = []
for name in right_names:
column_name = name
if name == left_on and name in left_names:
continue # skip when it's the join column
assert name not in left_names
if name in right.variables:
left.set_variable(name, right.variables[name])
elif column_name in right.virtual_columns:
left.add_virtual_column(name, right.virtual_columns[column_name])
else:
right_columns.append(name)
# we already add the column name here to get the same order
left.column_names.append(name)
left._initialize_column(name)
# merge the two datasets
right_dataset = right.dataset.project(*right_columns)
if lookup is not None:
# if lookup is None, we do a row based join
# and we only need to merge.
# if we have an array of lookup indices, we 'take' those
right_dataset = right_dataset.take(lookup, masked=any(lookup_masked))
# row number etc should not have changed, we only append new columns
# so no need to reset caches
left._dataset = left.dataset.merged(right_dataset)
return left
def export(self, path, column_names=None, byteorder="=", shuffle=False, selection=False, progress=None, virtual=True, sort=None, ascending=True):
"""Exports the DataFrame to a file written with arrow
:param DataFrameLocal df: DataFrame to export
:param str path: path for file
:param lis[str] column_names: list of column names to export or None for all columns
:param str byteorder: = for native, < for little endian and > for big endian (not supported for fits)
:param bool shuffle: export rows in random order
:param bool selection: export selection or not
:param progress: progress callback that gets a progress fraction as argument and should return True to continue,
or a default progress bar when progress=True
:param: bool virtual: When True, export virtual columns
:param str sort: expression used for sorting the output
:param bool ascending: sort ascending (True) or descending
:return:
"""
if path.endswith('.arrow'):
self.export_arrow(path)
elif path.endswith('.hdf5'):
self.export_hdf5(path, column_names, byteorder, shuffle, selection, progress=progress, virtual=virtual, sort=sort, ascending=ascending)
elif path.endswith('.fits'):
self.export_fits(path, column_names, shuffle, selection, progress=progress, virtual=virtual, sort=sort, ascending=ascending)
elif path.endswith('.parquet'):
self.export_parquet(path, column_names, shuffle, selection, progress=progress, virtual=virtual, sort=sort, ascending=ascending)
elif path.endswith('.csv'):
self.export_csv(path, selection=selection, progress=progress, virtual=virtual)
else:
raise ValueError('''Unrecognized file extension. Please use .arrow, .hdf5, .parquet, .fits, or .csv to export to the particular file format.''')
def export_arrow(self, path, column_names=None, byteorder="=", shuffle=False, selection=False, progress=None, virtual=True, sort=None, ascending=True):
"""Exports the DataFrame to a file written with arrow
:param DataFrameLocal df: DataFrame to export
:param str path: path for file
:param lis[str] column_names: list of column names to export or None for all columns
:param str byteorder: = for native, < for little endian and > for big endian
:param bool shuffle: export rows in random order
:param bool selection: export selection or not
:param progress: progress callback that gets a progress fraction as argument and should return True to continue,
or a default progress bar when progress=True
:param: bool virtual: When True, export virtual columns
:param str sort: expression used for sorting the output
:param bool ascending: sort ascending (True) or descending
:return:
"""
import vaex.arrow.export
vaex.arrow.export.export(self, path, column_names, byteorder, shuffle, selection, progress=progress, virtual=virtual, sort=sort, ascending=ascending)
def export_arrow_stream(self, path_or_writer, progress=None, chunk_size=default_chunk_size):
"""Exports the DataFrame as Arrow stream
:param path_or_writer path: path for file or :py:data:`pyarrow.RecordBatchStreamWriter`
:param progress: progress callback that gets a progress fraction as argument and should return True to continue,
or a default progress bar when progress=True
:return:
"""
schema = self[0:1].to_arrow_table().schema
progressbar = vaex.utils.progressbars(progress)
def write(writer):
progressbar(0)
N = len(self)
for i1, i2, table in self.to_arrow_table(chunk_size=chunk_size):
writer.write_table(table)
progressbar(i2/N)
progressbar(1.)
if isinstance(path_or_writer, str):
with pa.OSFile(path_or_writer, 'wb') as sink:
writer = pa.RecordBatchStreamWriter(sink, schema)
write(writer)
else:
write(path_or_writer)
def export_parquet(self, path, column_names=None, byteorder="=", shuffle=False, selection=False, progress=None, virtual=True, sort=None, ascending=True):
"""Exports the DataFrame to a parquet file
:param DataFrameLocal df: DataFrame to export
:param str path: path for file
:param lis[str] column_names: list of column names to export or None for all columns
:param str byteorder: = for native, < for little endian and > for big endian
:param bool shuffle: export rows in random order
:param bool selection: export selection or not
:param progress: progress callback that gets a progress fraction as argument and should return True to continue,
or a default progress bar when progress=True
:param: bool virtual: When True, export virtual columns
:param str sort: expression used for sorting the output
:param bool ascending: sort ascending (True) or descending
:return:
"""
import vaex.arrow.export
vaex.arrow.export.export_parquet(self, path, column_names, byteorder, shuffle, selection, progress=progress, virtual=virtual, sort=sort, ascending=ascending)
def export_hdf5(self, path, column_names=None, byteorder="=", shuffle=False, selection=False, progress=None, virtual=True, sort=None, ascending=True):
"""Exports the DataFrame to a vaex hdf5 file
:param DataFrameLocal df: DataFrame to export
:param str path: path for file
:param lis[str] column_names: list of column names to export or None for all columns
:param str byteorder: = for native, < for little endian and > for big endian
:param bool shuffle: export rows in random order
:param bool selection: export selection or not
:param progress: progress callback that gets a progress fraction as argument and should return True to continue,
or a default progress bar when progress=True
:param: bool virtual: When True, export virtual columns
:param str sort: expression used for sorting the output
:param bool ascending: sort ascending (True) or descending
:return:
"""
import vaex.export
vaex.export.export_hdf5(self, path, column_names, byteorder, shuffle, selection, progress=progress, virtual=virtual, sort=sort, ascending=ascending)
def export_fits(self, path, column_names=None, shuffle=False, selection=False, progress=None, virtual=True, sort=None, ascending=True):
"""Exports the DataFrame to a fits file that is compatible with TOPCAT colfits format
:param DataFrameLocal df: DataFrame to export
:param str path: path for file
:param lis[str] column_names: list of column names to export or None for all columns
:param bool shuffle: export rows in random order
:param bool selection: export selection or not
:param progress: progress callback that gets a progress fraction as argument and should return True to continue,
or a default progress bar when progress=True
:param: bool virtual: When True, export virtual columns
:param str sort: expression used for sorting the output
:param bool ascending: sort ascending (True) or descending
:return:
"""
import vaex.export
vaex.export.export_fits(self, path, column_names, shuffle, selection, progress=progress, virtual=virtual, sort=sort, ascending=ascending)
@docsubst
def export_csv(self, path, virtual=True, selection=False, progress=None, chunk_size=1_000_000, **kwargs):
""" Exports the DataFrame to a CSV file.
:param str path: Path for file
:param bool virtual: If True, export virtual columns as well
:param bool selection: {selection1}
:param progress: {progress}
:param int chunk_size: {chunk_size_export}
:param **kwargs: Extra keyword arguments to be passed on pandas.DataFrame.to_csv()
:return:
"""
import pandas as pd
expressions = self.get_column_names(virtual=virtual)
progressbar = vaex.utils.progressbars(progress)
dtypes = self[expressions].dtypes
n_samples = len(self)
for i1, i2, chunks in self.evaluate_iterator(expressions, chunk_size=chunk_size, selection=selection):
progressbar( i1 / n_samples)
chunk_dict = {col: values for col, values in zip(expressions, chunks)}
chunk_pdf = pd.DataFrame(chunk_dict)
if i1 == 0: # Only the 1st chunk should have a header and the rest will be appended
mode = 'w'
header = True
else:
mode = 'a'
header = False
chunk_pdf.to_csv(path_or_buf=path, mode=mode, header=header, index=False, **kwargs)
progressbar(1.0)
return
def _needs_copy(self, column_name):
import vaex.file.other
return not \
((column_name in self.column_names and not
isinstance(self.columns[column_name], Column) and not
isinstance(self.columns[column_name], vaex.file.other.DatasetTap.TapColumn) and
self.columns[column_name].dtype.type == np.float64 and
self.columns[column_name].strides[0] == 8 and
column_name not in
self.virtual_columns) or self.data_type(column_name) == str_type or self.data_type(column_name).kind == 'S')
# and False:
def selected_length(self, selection="default"):
"""The local implementation of :func:`DataFrame.selected_length`"""
return int(self.count(selection=selection).item())
# np.sum(self.mask) if self.has_selection() else None
# def _set_mask(self, mask):
# self.mask = mask
# self._has_selection = mask is not None
# # self.signal_selection_changed.emit(self)
def groupby(self, by=None, agg=None):
"""Return a :class:`GroupBy` or :class:`DataFrame` object when agg is not None
Examples:
>>> import vaex
>>> import numpy as np
>>> np.random.seed(42)
>>> x = np.random.randint(1, 5, 10)
>>> y = x**2
>>> df = vaex.from_arrays(x=x, y=y)
>>> df.groupby(df.x, agg='count')
# x y_count
0 3 4
1 4 2
2 1 3
3 2 1
>>> df.groupby(df.x, agg=[vaex.agg.count('y'), vaex.agg.mean('y')])
# x y_count y_mean
0 3 4 9
1 4 2 16
2 1 3 1
3 2 1 4
>>> df.groupby(df.x, agg={'z': [vaex.agg.count('y'), vaex.agg.mean('y')]})
# x z_count z_mean
0 3 4 9
1 4 2 16
2 1 3 1
3 2 1 4
Example using datetime:
>>> import vaex
>>> import numpy as np
>>> t = np.arange('2015-01-01', '2015-02-01', dtype=np.datetime64)
>>> y = np.arange(len(t))
>>> df = vaex.from_arrays(t=t, y=y)
>>> df.groupby(vaex.BinnerTime.per_week(df.t)).agg({'y' : 'sum'})
# t y
0 2015-01-01 00:00:00 21
1 2015-01-08 00:00:00 70
2 2015-01-15 00:00:00 119
3 2015-01-22 00:00:00 168
4 2015-01-29 00:00:00 87
:param dict, list or agg agg: Aggregate operation in the form of a string, vaex.agg object, a dictionary
where the keys indicate the target column names, and the values the operations, or the a list of aggregates.
When not given, it will return the groupby object.
:return: :class:`DataFrame` or :class:`GroupBy` object.
"""
from .groupby import GroupBy
groupby = GroupBy(self, by=by)
if agg is None:
return groupby
else:
return groupby.agg(agg)
def binby(self, by=None, agg=None):
"""Return a :class:`BinBy` or :class:`DataArray` object when agg is not None
The binby operation does not return a 'flat' DataFrame, instead it returns an N-d grid
in the form of an xarray.
:param dict, list or agg agg: Aggregate operation in the form of a string, vaex.agg object, a dictionary
where the keys indicate the target column names, and the values the operations, or the a list of aggregates.
When not given, it will return the binby object.
:return: :class:`DataArray` or :class:`BinBy` object.
"""
from .groupby import BinBy
binby = BinBy(self, by=by)
if agg is None:
return binby
else:
return binby.agg(agg)
def _selection(self, create_selection, name, executor=None, execute_fully=False):
def create_wrapper(current):
selection = create_selection(current)
# only create a mask when we have a selection, so we do not waste memory
if selection is not None and name not in self._selection_masks:
self._selection_masks[name] = vaex.superutils.Mask(self._length_unfiltered)
return selection
return super()._selection(create_wrapper, name, executor, execute_fully)
@property
def values(self):
"""Gives a full memory copy of the DataFrame into a 2d numpy array of shape (n_rows, n_columns).
Note that the memory order is fortran, so all values of 1 column are contiguous in memory for performance reasons.
Note this returns the same result as:
>>> np.array(ds)
If any of the columns contain masked arrays, the masks are ignored (i.e. the masked elements are returned as well).
"""
return self.__array__()
def _is_dtype_ok(dtype):
return dtype.type in [np.bool_, np.int8, np.int16, np.int32, np.int64, np.uint8, np.uint16,
np.uint32, np.uint64, np.float32, np.float64, np.datetime64] or\
dtype.type == np.string_ or dtype.type == np.unicode_
def _is_array_type_ok(array):
return _is_dtype_ok(array.dtype)
|
maartenbreddels/vaex
|
packages/vaex-core/vaex/dataframe.py
|
Python
|
mit
| 282,160
|
[
"Gaussian"
] |
460ab1acb9ef710939ef541cb63a8682e9379ddd74877bb13348d22630eb9dc3
|
#!/usr/bin/env python
##################################################
## DEPENDENCIES
import sys
import os
import os.path
try:
import builtins as builtin
except ImportError:
import __builtin__ as builtin
from os.path import getmtime, exists
import time
import types
from Cheetah.Version import MinCompatibleVersion as RequiredCheetahVersion
from Cheetah.Version import MinCompatibleVersionTuple as RequiredCheetahVersionTuple
from Cheetah.Template import Template
from Cheetah.DummyTransaction import *
from Cheetah.NameMapper import NotFound, valueForName, valueFromSearchList, valueFromFrameOrSearchList
from Cheetah.CacheRegion import CacheRegion
import Cheetah.Filters as Filters
import Cheetah.ErrorCatchers as ErrorCatchers
from Plugins.Extensions.OpenWebif.local import tstrings
##################################################
## MODULE CONSTANTS
VFFSL=valueFromFrameOrSearchList
VFSL=valueFromSearchList
VFN=valueForName
currentTime=time.time
__CHEETAH_version__ = '2.4.4'
__CHEETAH_versionTuple__ = (2, 4, 4, 'development', 0)
__CHEETAH_genTime__ = 1453357629.459436
__CHEETAH_genTimestamp__ = 'Thu Jan 21 15:27:09 2016'
__CHEETAH_src__ = '/home/babel/Build/Test/OpenPLi5/openpli5.0/build/tmp/work/tmnanoseplus-oe-linux/enigma2-plugin-extensions-openwebif/1+gitAUTOINC+186ea358f6-r0/git/plugin/controllers/views/mobile/movies.tmpl'
__CHEETAH_srcLastModified__ = 'Thu Jan 21 15:27:08 2016'
__CHEETAH_docstring__ = 'Autogenerated by Cheetah: The Python-Powered Template Engine'
if __CHEETAH_versionTuple__ < RequiredCheetahVersionTuple:
raise AssertionError(
'This template was compiled with Cheetah version'
' %s. Templates compiled before version %s must be recompiled.'%(
__CHEETAH_version__, RequiredCheetahVersion))
##################################################
## CLASSES
class movies(Template):
##################################################
## CHEETAH GENERATED METHODS
def __init__(self, *args, **KWs):
super(movies, self).__init__(*args, **KWs)
if not self._CHEETAH__instanceInitialized:
cheetahKWArgs = {}
allowedKWs = 'searchList namespaces filter filtersLib errorCatcher'.split()
for k,v in KWs.items():
if k in allowedKWs: cheetahKWArgs[k] = v
self._initCheetahInstance(**cheetahKWArgs)
def respond(self, trans=None):
## CHEETAH: main method generated for this template
if (not trans and not self._CHEETAH__isBuffering and not callable(self.transaction)):
trans = self.transaction # is None unless self.awake() was called
if not trans:
trans = DummyTransaction()
_dummyTrans = True
else: _dummyTrans = False
write = trans.response().write
SL = self._CHEETAH__searchList
_filter = self._CHEETAH__currentFilter
########################################
## START - generated method body
write(u'''<html>\r
<head>\r
\t<title>OpenWebif</title>\r
\t<meta http-equiv="Content-Type" content="text/html; charset=utf-8" />\r
\t<meta name="viewport" content="user-scalable=no, width=device-width"/>\r
\t<meta name="apple-mobile-web-app-capable" content="yes" />\r
\t<link rel="stylesheet" type="text/css" href="/css/jquery.mobile-1.0.min.css" media="screen"/>\r
\t<link rel="stylesheet" type="text/css" href="/css/iphone.css" media="screen"/>\r
\t<script src="/js/jquery-1.6.2.min.js"></script>\r
\t<script src="/js/jquery.mobile-1.0.min.js"></script>\r
</head>\r
<body> \r
\t<div data-role="page">\r
\r
\t\t<div id="header">\r
\t\t\t<div class="button" onClick="history.back()">''')
_v = VFFSL(SL,"tstrings",True)['back'] # u"$tstrings['back']" on line 17, col 49
if _v is not None: write(_filter(_v, rawExpr=u"$tstrings['back']")) # from line 17, col 49.
write(u'''</div>\r
\t\t\t<h1><a style="color:#FFF;text-decoration:none;" href=\'/mobile\'>OpenWebif</a></h1>
\t\t\t<h2>''')
_v = VFFSL(SL,"tstrings",True)['movies'] # u"$tstrings['movies']" on line 19, col 8
if _v is not None: write(_filter(_v, rawExpr=u"$tstrings['movies']")) # from line 19, col 8.
write(u'''</h2>\r
\t\t</div>\r
\r
\t\t<div data-role="fieldcontain">\r
\t\t <select name="select-choice-1" id="select-choice-moviedir" onChange="window.location.href=\'/mobile/movies?dirname=\'+escape(options[selectedIndex].value);">\r
\t\t\t <option value="''')
_v = VFFSL(SL,"directory",True) # u'$directory' on line 24, col 21
if _v is not None: write(_filter(_v, rawExpr=u'$directory')) # from line 24, col 21.
write(u'''">''')
_v = VFFSL(SL,"directory",True) # u'$directory' on line 24, col 33
if _v is not None: write(_filter(_v, rawExpr=u'$directory')) # from line 24, col 33.
write(u'''</option>\r
''')
for bookmark in VFFSL(SL,"bookmarks",True): # generated from line 25, col 6
write(u'''\t\t\t <option value="''')
_v = VFFSL(SL,"bookmark",True) # u'$bookmark' on line 26, col 21
if _v is not None: write(_filter(_v, rawExpr=u'$bookmark')) # from line 26, col 21.
write(u'''">''')
_v = VFFSL(SL,"bookmark",True) # u'$bookmark' on line 26, col 32
if _v is not None: write(_filter(_v, rawExpr=u'$bookmark')) # from line 26, col 32.
write(u'''</option>\r
''')
write(u'''\t\t </select>\r
\t\t</div>\r
\r
\t\t<div id="contentContainer">\r
\t\t\t<ul data-role="listview" data-inset="true" data-theme="d">\r
\t\t\t\t<li data-role="list-divider" role="heading" data-theme="b">''')
_v = VFFSL(SL,"tstrings",True)['movies'] # u"$tstrings['movies']" on line 33, col 64
if _v is not None: write(_filter(_v, rawExpr=u"$tstrings['movies']")) # from line 33, col 64.
write(u'''</li>\r
''')
for movie in VFFSL(SL,"movies",True): # generated from line 34, col 5
if VFFSL(SL,"movie.eventname",True) != "": # generated from line 35, col 5
write(u'''\t\t\t\t<li>''')
_v = VFFSL(SL,"movie.eventname",True) # u'$movie.eventname' on line 36, col 9
if _v is not None: write(_filter(_v, rawExpr=u'$movie.eventname')) # from line 36, col 9.
write(u'''</li>\r
''')
else: # generated from line 37, col 5
write(u'''\t\t\t\t<li>''')
_v = VFFSL(SL,"movie.filename",True) # u'$movie.filename' on line 38, col 9
if _v is not None: write(_filter(_v, rawExpr=u'$movie.filename')) # from line 38, col 9.
write(u'''</li>\r
''')
write(u'''\t\t\t</ul>\r
\t\t</div>\r
\r
\t\t<div id="footer">\r
\t\t\t<p>OpenWebif Mobile</p>\r
\t\t\t<a onclick="document.location.href=\'/index?mode=fullpage\';return false;" href="#">''')
_v = VFFSL(SL,"tstrings",True)['show_full_openwebif'] # u"$tstrings['show_full_openwebif']" on line 46, col 86
if _v is not None: write(_filter(_v, rawExpr=u"$tstrings['show_full_openwebif']")) # from line 46, col 86.
write(u'''</a>\r
\t\t</div>\r
\t\t\r
\t</div>\r
</body>\r
</html>\r
''')
########################################
## END - generated method body
return _dummyTrans and trans.response().getvalue() or ""
##################################################
## CHEETAH GENERATED ATTRIBUTES
_CHEETAH__instanceInitialized = False
_CHEETAH_version = __CHEETAH_version__
_CHEETAH_versionTuple = __CHEETAH_versionTuple__
_CHEETAH_genTime = __CHEETAH_genTime__
_CHEETAH_genTimestamp = __CHEETAH_genTimestamp__
_CHEETAH_src = __CHEETAH_src__
_CHEETAH_srcLastModified = __CHEETAH_srcLastModified__
_mainCheetahMethod_for_movies= 'respond'
## END CLASS DEFINITION
if not hasattr(movies, '_initCheetahAttributes'):
templateAPIClass = getattr(movies, '_CHEETAH_templateClass', Template)
templateAPIClass._addCheetahPlumbingCodeToClass(movies)
# CHEETAH was developed by Tavis Rudd and Mike Orr
# with code, advice and input from many other volunteers.
# For more information visit http://www.CheetahTemplate.org/
##################################################
## if run from command line:
if __name__ == '__main__':
from Cheetah.TemplateCmdLineIface import CmdLineIface
CmdLineIface(templateObj=movies()).run()
|
MOA-2011/e2openplugin-OpenWebif
|
plugin/controllers/views/mobile/movies.py
|
Python
|
gpl-2.0
| 8,429
|
[
"VisIt"
] |
86257ac1e02a8c8ccf7d705a59f704d80df61bbf27f599c48f8b13dba6585cf6
|
#!/usr/bin/python
usage = """a script that tests the basic functionality of our modules"""
#=================================================
import os
import sys
sys.path.append("/home/reed/LIGO/BAYESburst/bayesburst/")
import numpy as np
import healpy as hp
import utils
import detector_cache
import time
import pickle
from optparse import OptionParser
#=================================================
parser = OptionParser(usage=usage)
parser.add_option("", "--network", default="HL", type="string", help="which network to run")
parser.add_option("", "--hPrior", default=False, action="store_true")
parser.add_option("", "--malmquist-hPrior", default=False, action="store_true")
parser.add_option("", "--hPrior_pareto", default=False, action="store_true")
parser.add_option("", "--angPrior", default=False, action="store_true")
parser.add_option("", "--ap_angPrior", default=False, action="store_true")
parser.add_option("", "--posterior", default=False, action="store_true")
parser.add_option("", "--dpf", default=False, action="store_true")
parser.add_option("", "--model-selection", default=False, action="store_true")
parser.add_option("", "--num-proc", default=2, type="int")
parser.add_option("", "--max-proc", default=2, type="int")
parser.add_option("", "--max-array-size", default=100, type="int")
parser.add_option("", "--pkl", default=False, action="store_true")
parser.add_option("", "--check", default=False, action="store_true")
parser.add_option("", "--skip-mp", default=False, action="store_true")
parser.add_option("", "--skip-plots", default=False, action="store_true")
parser.add_option("", "--skip-diagnostic", default=False, action="store_true")
parser.add_option("", "--skip-diagnostic-plots", default=False, action="store_true")
parser.add_option("", "--zero-data", default=False, action="store_true")
parser.add_option("", "--zero-noise", default=False, action="store_true")
parser.add_option("-o", "--output-dir", default="./", type="string")
parser.add_option("-t", "--tag", default="", type="string")
opts, args = parser.parse_args()
if opts.tag:
opts.tag = "_"+opts.tag
if opts.model_selection:
opts.posterior = True
if opts.dpf:
opts.posterior = True
if opts.posterior:
opts.hPrior = opts.angPrior = True
opts.skip_diagnostic_plots = opts.skip_diagnostic + opts.skip_diagnostic_plots
if not os.path.exists(opts.output_dir):
os.makedirs(opts.output_dir)
num_proc = opts.num_proc
max_proc = opts.max_proc
max_array_size = opts.max_array_size
eps = 1e-03 ### precision for "floating point errors"? Not sure where the errors are coming into AB_A and AB_invA relative to A, invA
### important for comparing output from different methods
eps_bayes = 1e-03 ### different parameter for log_bayes...
#=================================================
# set up
#=================================================
### set up stuff for hPrior
a = 4
xmin = 1e-24
xmax = 1e-20
npts = 1001
vmin = 10*xmin**2
vmax = 0.1*xmax**2
n_gaus_per_decade = 2 ### approximate scaling found empirically to make my decomposition work well
n_gaus = int(round((np.log10(vmax**0.5)-np.log10(vmin**0.5))*n_gaus_per_decade, 0))
print "n_gaus :", n_gaus
variances = np.logspace(np.log10(vmin), np.log10(vmax), n_gaus)
n_pol = 2
#n_pol = 1
n_freqs = 101
freqs = np.linspace(100, 300, n_freqs)
df = freqs[1]-freqs[0]
seglen = df**-1
### set up stuff for angprior
nside_exp = 4
nside = 2**nside_exp
n_pix = hp.nside2npix(nside)
prior_type="uniform"
### set up stuff for ap_angprior
if opts.network == "HL":
network = utils.Network([detector_cache.LHO, detector_cache.LLO], freqs=freqs, Np=n_pol)
elif opts.network == "HV":
network = utils.Network([detector_cache.LHO, detector_cache.Virgo], freqs=freqs, Np=n_pol)
elif opts.network == "LV":
network = utils.Network([detector_cache.Virgo, detector_cache.LLO], freqs=freqs, Np=n_pol)
elif opts.network == "HLV":
network = utils.Network([detector_cache.LHO, detector_cache.LLO, detector_cache.Virgo], freqs=freqs, Np=n_pol)
else:
raise ValueError, "--network=%s not understood"%opts.network
n_ifo = len(network.detectors)
### set up stuff for posterior
freq_truth = np.ones_like(freqs, bool)
### set up stuff for model selection
n_bins = 23
min_n_bins = 15
max_n_bins = 25
dn_bins = 1
log_bayes_thr = 0
generous_log_bayes_thr = -1
### plotting options
log_dynamic_range = 100
### define injection data
import injections
to=0.0
phio=0.0
fo=200
tau=0.010
#tau=0.100
q=2**0.5*np.pi*fo*tau ### the sine-gaussian's q, for reference
#hrss=2e-22 #network SNR ~50 (screaming)
#hrss=1e-22 #network SNR ~25 (cacophonous)
hrss=6e-23 #network SNR ~15 (loud)
#hrss=5e-23 #network SNR ~ 12.5 (audible)
#hrss=4e-23 #network SNR ~10 (quiet)
#hrss=2e-23 #network SNR ~5 (silent)
print "injecting data"
if opts.zero_data:
data_inj = np.zeros((n_freqs, n_ifo), complex)
snr_net_inj = 0.0
injang=None
else:
print "generating injection"
theta_inj = np.pi/4
phi_inj = 3*np.pi/2
h = injections.sinegaussian_f(freqs, to=to, phio=phio, fo=fo, tau=tau, hrss=hrss, alpha=np.pi/2)
data_inj = injections.inject(network, h, theta_inj, phi_inj, psi=0.0)
snrs_inj = network.snrs(data_inj) ### compute individual SNRs for detectors
snr_net_inj = np.sum(snrs_inj**2)**0.5 ### network SNR
injang=(theta_inj, phi_inj)
if opts.zero_noise:
noise = np.zeros((n_freqs, n_ifo), complex)
else:
print "drawing noise"
noise = network.draw_noise()
data = data_inj + noise
#=================================================
### filenames
tag = "_%d-%d-%d%s"%(n_freqs, n_gaus, nside_exp, opts.tag)
hfigname="%s/hprior%d%s.png"%(opts.output_dir, n_gaus_per_decade, tag)
malmquist_hfigname="%s/malmquist_hprior%d%s.png"%(opts.output_dir, n_gaus_per_decade, tag)
posterior_figname = "%s/posterior%s.png"%(opts.output_dir, tag)
logposterior_figname="%s/log-posterior%s.png"%(opts.output_dir, tag)
fb_posterior_figname="%s/posterior-fixed_bandwidth%s.png"%(opts.output_dir, tag)
fb_logposterior_figname="%s/log-posterior-fixed_bandwidth%s.png"%(opts.output_dir, tag)
vb_posterior_figname="%s/posterior-variable_bandwidth%s.png"%(opts.output_dir, tag)
vb_logposterior_figname="%s/log-posterior-variable_bandwidth%s.png"%(opts.output_dir, tag)
stacked_vb_posterior_figname="%s/posterior-stacked_variable_bandwidth%s.png"%(opts.output_dir, tag)
stacked_vb_logposterior_figname="%s/log-posterior-stacked_variable_bandwidth%s.png"%(opts.output_dir, tag)
lbc_posterior_figname="%s/posterior-log_bayes_cut%s.png"%(opts.output_dir, tag)
lbc_logposterior_figname="%s/log-posterior-log_bayes_cut%s.png"%(opts.output_dir, tag)
ma_posterior_figname="%s/posterior-model_average%s.png"%(opts.output_dir, tag)
ma_logposterior_figname="%s/log-posterior-model_average%s.png"%(opts.output_dir, tag)
wf_posterior_figname="%s/posterior-waterfill%s.png"%(opts.output_dir, tag)
wf_logposterior_figname="%s/log-posterior-waterfill%s.png"%(opts.output_dir, tag)
posterior_pklname = "%s/posterior%s.pkl"%(opts.output_dir, tag)
posterior_filename = "%s/posterior%s.fits"%(opts.output_dir, tag)
fb_posterior_filename = "%s/posterior-fixed_bandwidth%s.fits"%(opts.output_dir, tag)
vb_posterior_filename = "%s/posterior-variable_bandwidth%s.fits"%(opts.output_dir, tag)
stacked_vb_posterior_filename = "%s/posterior-stacked_variable_bandwidth%s.fits"%(opts.output_dir, tag)
lbc_posterior_filename = "%s/posterior-log_bayes_cut%s.fits"%(opts.output_dir, tag)
ma_posterior_filename = "%s/posterior-model_average%s.fits"%(opts.output_dir, tag)
wf_posterior_filename = "%s/posterior-waterfill%s.fits"%(opts.output_dir, tag)
angfigname = "%s/angprior%s.png"%(opts.output_dir, tag)
ap_angfigname = "%s/ap_angprior%s.png"%(opts.output_dir, tag)
diag_figname=opts.output_dir+"/%s-%s"+tag+".png"
logdiag_figname=opts.output_dir+"/log-%s-%s"+tag+".png"
#=================================================
# PRIORS
#=================================================
import priors
if opts.hPrior:
print "pareto_amplitudes"
to=time.time()
pareto_amps = priors.pareto_amplitudes(a, variances)
print "\t", time.time()-to
print "pareto"
to=time.time()
pareto_means, pareto_covariance, pareto_amps = priors.pareto(a, n_freqs, n_pol, variances)
print "\t", time.time()-to
print "hPrior"
to=time.time()
hprior_obj = priors.hPrior(freqs, pareto_means, pareto_covariance, amplitudes=pareto_amps, n_gaus=n_gaus, n_pol=n_pol)
print "\t", time.time()-to
if opts.check:
invcovariance = hprior_obj.invcovariance
detinvcovariance = hprior_obj.detinvcovariance
print "hPrior.set_covariance(byhand)"
to=time.time()
hprior_obj.set_covariance(pareto_covariance, n_freqs=n_freqs, n_pol=n_pol, n_gaus=n_gaus, byhand=True)
print "\t", time.time()-to
if opts.check:
invcovariance_byhand = hprior_obj.invcovariance
detinvcovariance_byhand = hprior_obj.detinvcovariance
if np.any(np.abs(invcovariance-invcovariance_byhand) > eps*np.abs(invcovariance+invcovariance_byhand)):
raise StandardError, "invcovariance != invcovariance_byhand"
elif np.any(invcovariance!=invcovariance_byhand):
print "\tinvcovariance-invcovariance_byhand < %s*(invcovariance+invcovariance_byhand)"%str(eps)
else:
print "\tinvcovariance==invcovariance_byhand"
if np.any(np.abs(detinvcovariance-detinvcovariance_byhand) > eps*np.abs(detinvcovariance+detinvcovariance_byhand)):
raise StandardError, "detinvcovariance != detinvcovariance_byhand"
elif np.any(detinvcovariance!=detinvcovariance_byhand):
print "\tdetinvcovariance-detinvcovariance_byhand <= %s*(detinvcovariance+detinvcovariance_byhand)"%str(eps)
else:
print "\tdetinvcovariance==detinvcovariance_byhand"
if opts.hPrior_pareto:
print "hPrior_pareto"
to=time.time()
hprior_obj = priors.hPrior_pareto(a, variances, freqs=freqs, n_freqs=n_freqs, n_gaus=n_gaus, n_pol=n_pol, byhand=True)
print "\t", time.time()-to
print "hPrior_pareto.get_amplitudes"
to=time.time()
amplitudes = hprior_obj.get_amplitudes(freq_truth=freq_truth, n_pol_eff=n_pol)
print "\t", time.time()-to
if not opts.skip_plots:
print "hPrior.plot"
to=time.time()
hprior_obj.plot(hfigname, grid=True, xmin=xmin, xmax=xmax, npts=npts)#, ymin=1e0)
print "\t", time.time()-to
if opts.malmquist_hPrior:
print "malmquist_pareto"
to=time.time()
malmquist_means, malmquist_covariance, malmquist_amps = priors.malmquist_pareto(a, n_freqs, n_pol, variances[1:], variances[0])
print "\t", time.time()-to
print "hPrior(malmquist)"
to=time.time()
hprior_obj = priors.hPrior(freqs, malmquist_means, malmquist_covariance, amplitudes=malmquist_amps, n_gaus=n_gaus, n_pol=n_pol)
print "\t", time.time()-to
if not opts.skip_plots:
print "hPrior.plot(malmquist)"
to=time.time()
hprior_obj.plot(malmquist_hfigname, grid=True, xmin=xmin, xmax=xmax, npts=npts)#, ymin=1e0)
print "\t", time.time()-to
#=================================================
if opts.angPrior:
print "angPrior"
to=time.time()
angprior_obj = priors.angPrior(nside_exp, prior_type=prior_type)
print "\t", time.time()-to
print "angPrior.angprior"
to=time.time()
angprior = angprior_obj.angprior()
print "\t", time.time()-to
print "angPrior.__call__"
to=time.time()
p = angprior_obj(np.pi/2, np.pi)
print "\t", time.time()-to
if not opts.skip_plots:
print "angPrior.plot"
to=time.time()
angprior_obj.plot(angfigname, inj=injang)
print "\t", time.time()-to
#=================================================
if opts.ap_angPrior:
print "ap_angPrior"
to=time.time()
ap_angprior_obj = priors.angPrior(nside_exp, prior_type="antenna_pattern", frequency=150, exp=3.0, network=network)
print "\t", time.time()-to
if not opts.skip_plots:
print "angPrior.plot"
to=time.time()
ap_angprior_obj.plot(ap_angfigname, inj=injang)
print "\t", time.time()-to
#=================================================
# POSTERIORS
#=================================================
import posteriors
if opts.posterior:
print "posterior"
to=time.time()
posterior_obj = posteriors.Posterior()
print "\t", time.time()-to
#=========================================
# setting basic data stored within the object from which we compute stats
#=========================================
print "set_network"
to=time.time()
posterior_obj.set_network(network)
print "\t", time.time()-to
print "set_hPrior"
to=time.time()
posterior_obj.set_hPrior(hprior_obj)
print "\t", time.time()-to
print "set_angPrior"
to=time.time()
posterior_obj.set_angPrior(angprior_obj)
print "\t", time.time()-to
print "set_seglen"
to=time.time()
posterior_obj.set_seglen(seglen)
print "\t", time.time()-to
print "set_data"
to=time.time()
posterior_obj.set_data(data)
print "\t", time.time()-to
print "posterior.__init__()"
to=time.time()
posterior_obj = posteriors.Posterior(network=network, hPrior=hprior_obj, angPrior=angprior_obj, seglen=seglen, data=data)
print "\t", time.time()-to
#=========================================
# setting computed data stored within the object from which we compute stats
#=========================================
print "posterior.set_theta_phi()"
to=time.time()
posterior_obj.set_theta_phi()
print "\t", time.time()-to
print "posterior.set_A"
to=time.time()
posterior_obj.set_A()
print "\t", time.time()-to
if opts.check:
A = posterior_obj.A
invA = posterior_obj.invA
print "posterior.set_A(byhand)"
to=time.time()
posterior_obj.set_A(byhand=True)
print "\t", time.time()-to
if opts.check:
A_byhand = posterior_obj.A
invA_byhand = posterior_obj.invA
if np.any(np.abs(A-A_byhand) > eps*np.abs(A+A_byhand)):
raise StandardError, "A_byhand != A"
elif np.any(A!=A_byhand):
print "\tA_byhand-A <= %s*(A+A_byhand)"%str(eps)
else:
print "\tA_byhand==A"
if np.any(np.abs(invA-invA_byhand) > eps*np.abs(invA+invA_byhand)):
raise StandardError, "invA_byhand != invA"
elif np.any(invA!=invA_byhand):
print "\tinvA_byhand-invA <= %s*(invA+invA_byhand)"%str(eps)
else:
print "\tinvA_byhand==invA"
if not opts.skip_mp:
print "posterior.set_A_mp"
to=time.time()
posterior_obj.set_A_mp(num_proc=num_proc, max_proc=max_proc, max_array_size=max_array_size)
print "\t", time.time()-to
if opts.check:
A_mp = posterior_obj.A
invA_mp = posterior_obj.invA
print "posterior.set_A_mp(byhand)"
to=time.time()
posterior_obj.set_A_mp(num_proc=num_proc, max_proc=max_proc, max_array_size=max_array_size, byhand=True)
print "\t", time.time()-to
if opts.check:
A_mp_byhand = posterior_obj.A
invA_mp_byhand = posterior_obj.invA
if opts.check:
if np.any(A!=A_mp):
raise StandardError, "A!=A_mp"
else:
print "\tA==A_mp"
if np.any(invA!=invA_mp):
raise StandardError, "invA!=invA_mp"
else:
print "\tinvA==invA_mp"
if np.any(A_byhand!=A_mp_byhand):
raise StandardError, "A_byhand!=A_mp_byhand"
else:
print "\tA_byhand==A_mp_byhand"
if np.any(invA_byhand!=invA_mp_byhand):
raise StandardError, "invA_byhand!=invA_mp_byhand"
else:
print "\tinvA_byhand==invA_mp_byhand"
print "posterior.set_B"
to=time.time()
posterior_obj.set_B()
if opts.check:
B = posterior_obj.B
print "\t", time.time()-to
if not opts.skip_mp:
print "posterior.set_B_mp"
to=time.time()
posterior_obj.set_B_mp(num_proc=num_proc, max_proc=max_proc)
if opts.check:
B_mp = posterior_obj.B
print "\t", time.time()-to
if opts.check:
if np.any(B!=B_mp):
raise StandardError, "B!=B_mp"
else:
print "\tB==B_mp"
print "posterior.set_AB"
to=time.time()
posterior_obj.set_AB()
print "\t", time.time()-to
if opts.check:
AB_A = posterior_obj.A
AB_invA = posterior_obj.invA
AB_B = posterior_obj.B
print "posterior.set_AB(byhand)"
to=time.time()
posterior_obj.set_AB(byhand=True)
print "\t", time.time()-to
if opts.check:
AB_A_byhand = posterior_obj.A
AB_invA_byhand = posterior_obj.invA
AB_B_byhand = posterior_obj.B
if np.any(np.abs(AB_A-AB_A_byhand) > eps*np.abs(AB_A+AB_A_byhand)):
raise StandardError, "AB_A!=AB_A_byhand"
elif np.any(AB_A!=AB_A_byhand):
print "\tAB_A-AB_A_byhand <= %s*(AB_A+AB_A_byhand)"%str(eps)
else:
print "\tAB_A==AB_A_byhand"
if np.any(np.abs(AB_invA-AB_invA_byhand) > eps*np.abs(AB_invA+AB_invA_byhand)):
raise StandardError, "AB_invA!=AB_invA_byhand"
elif np.any(AB_invA!=AB_invA_byhand):
print "\tAB_invA-AB_invA_byhand <= %s*(AB_invA+AB_invA_byhand)"%str(eps)
else:
print "\tAB_invA==AB_invA_byhand"
if np.any(np.abs(AB_B-AB_B_byhand) > eps*np.abs(AB_B+AB_B_byhand)):
raise StandardError, "AB_B!=AB_B_byhand"
elif np.any(AB_B!=AB_B_byhand):
print "\tAB_B-AB_B_byhand <= %s*(AB_B+AB_B_byhand)"%str(eps)
else:
print "\tAB_B==AB_B_byhand"
if not opts.skip_mp:
print "posterior.set_AB_mp"
to=time.time()
posterior_obj.set_AB_mp(num_proc=num_proc, max_proc=max_proc, max_array_size=max_array_size)
print "\t", time.time()-to
if opts.check:
AB_A_mp = posterior_obj.A
AB_invA_mp = posterior_obj.invA
AB_B_mp = posterior_obj.B
print "posterior.set_AB_mp(byhand)"
to=time.time()
posterior_obj.set_AB_mp(num_proc=num_proc, max_proc=max_proc, max_array_size=max_array_size, byhand=True)
print "\t", time.time()-to
if opts.check:
AB_A_mp_byhand = posterior_obj.A
AB_invA_mp_byhand = posterior_obj.invA
AB_B_mp_byhand = posterior_obj.B
if opts.check:
if np.any(AB_A!=AB_A_mp):
raise StandardError, "AB_A!=AB_A_mp"
else:
print "\tAB_A==AB_A_mp"
if np.any(AB_invA!=AB_invA_mp):
raise StandardError, "AB_invA!=AB_invA_mp"
else:
print "\tAB_invA==AB_invA_mp"
if np.any(AB_B!=AB_B_mp):
raise StandardError, "AB_B!=AB_B_mp"
else:
print "\tAB_B==AB_B_mp"
if np.any(np.abs(AB_A_mp-AB_A_mp_byhand) > eps*np.abs(AB_A_mp+AB_A_mp_byhand)):
raise StandardError, "AB_A_mp!=AB_A_mp_byhand"
elif np.any(AB_A_mp!=AB_A_mp_byhand):
print "\tAB_A_mp-AB_A_mp_byhand <= %s*(AB_A_mp+AB_A_mp_byhand)"%str(eps)
else:
print "\tAB_A_mp==AB_A_mp_byhand"
if np.any(np.abs(AB_invA_mp-AB_invA_mp_byhand) > eps*np.abs(AB_invA_mp+AB_invA_mp_byhand)):
raise StandardError, "AB_invA_mp!=AB_invA_mp_byhand"
elif np.any(AB_invA_mp!=AB_invA_mp_byhand):
print "\tAB_invA_mp-AB_invA_mp_byhand <= %s*(AB_invA_mp+AB_invA_mp_byhand)"%str(eps)
else:
print "\tAB_invA_mp==AB_invA_mp_byhand"
if np.any(np.abs(AB_B_mp-AB_B_mp_byhand) > eps*np.abs(AB_B_mp+AB_B_mp_byhand)):
raise StandardError, "AB_B_mp!=AB_B_mp_byhand"
elif np.any(AB_B_mp!=AB_B_mp_byhand):
print "\tAB_B_mp-AB_B_mp_byhand <= %s*(AB_B_mp+AB_B_mp_byhand)"%str(eps)
else:
print "\tAB_B_mp==AB_B_mp_byhand"
if opts.check:
if np.any(np.abs(AB_A-A) > eps*np.abs(AB_A+A)):
raise StandardError, "AB_A!=A"
elif np.any(AB_A!=A):
print "\tAB_A-A <= %s*(AB_A+A)"%str(eps)
else:
print "\tAB_A==A"
if np.any(np.abs(AB_invA-invA) > eps*np.abs(AB_invA+invA)):
raise StandardError, "AB_invA!=invA"
elif np.any(AB_invA!=invA):
print "\tAB_invA-invA <= %s*(AB_A+A)"%str(eps)
else:
print "\tAB_invA==invA"
if np.any(AB_B!=B):
raise StandardError, "AB_B!=B"
else:
print "\tAB_B==B"
print "posterior.set_P"
to=time.time()
posterior_obj.set_P()
print "\t", time.time()-to
if opts.check:
P = posterior_obj.P
invP = posterior_obj.invP
detinvP = posterior_obj.detinvP
print "posterior.set_P(byhand)"
to=time.time()
posterior_obj.set_P(byhand=True)
print "\t", time.time()-to
if opts.check:
P_byhand = posterior_obj.P
invP_byhand = posterior_obj.invP
detinvP_byhand = posterior_obj.detinvP
if np.any(P != P_byhand):
raise StandardError, "P!=P_byhand"
else:
print "\tP==P_byhand"
if np.any(np.abs(invP-invP_byhand) > eps*np.abs(invP+invP_byhand)):
raise StandardError, "invP!=invP_byhand"
elif np.any(invP != invP_byhand):
print "\tinvP-invP_byhand <= %s*(invP+invP_byhand)"%str(eps)
else:
print "\tinvP==invP_byhand"
if np.any(np.abs(detinvP-detinvP_byhand) > eps*np.abs(detinvP+detinvP_byhand)):
raise StandardError, "detinvP!=detinvP_byhand"
elif np.any(detinvP != detinvP_byhand):
print "\tdetinvP-detinvP_byhand <= %s*(detinvP+detinvP_byhand)"%str(eps)
else:
print "\tdetinvP==detinvP_byhand"
if not opts.skip_mp:
print "posterior.set_P_mp"
to=time.time()
posterior_obj.set_P_mp(num_proc=num_proc, max_proc=max_proc, max_array_size=max_array_size)
print "\t", time.time()-to
if opts.check:
P_mp = posterior_obj.P
invP_mp = posterior_obj.invP
detinvP_mp = posterior_obj.detinvP
print "posterior.set_P_mp(byhand)"
to=time.time()
posterior_obj.set_P_mp(num_proc=num_proc, max_proc=max_proc, max_array_size=max_array_size, byhand=True)
print "\t", time.time()-to
if opts.check:
P_mp_byhand = posterior_obj.P
invP_mp_byhand = posterior_obj.invP
detinvP_mp_byhand = posterior_obj.detinvP
if np.any(P_mp != P_mp_byhand):
raise StandardError, "P_mp!=P_mp_byhand"
else:
print "\tP_mp==P_mp_byhand"
if np.any(np.abs(invP_mp-invP_mp_byhand) > eps*np.abs(invP_mp+invP_mp_byhand)):
raise StandardError, "invP_mp!=invP_mp_byhand"
elif np.any(invP_mp != invP_mp_byhand):
print "\tinvP_mp-invP_mp_byhand <= %s*(invP_mp+invP_mp_byhand)"%str(eps)
else:
print "\tinvP_mp==invP_mp_byhand"
if np.any(np.abs(detinvP_mp-detinvP_mp_byhand) > eps*np.abs(detinvP_mp+detinvP_mp_byhand)):
raise StandardError, "detinvP_mp!=detinvP_mp_byhand"
elif np.any(detinvP_mp != detinvP_mp_byhand):
print "\tdetinvP_mp-detinvP_mp_byhand <= %s*(detinvP_mp+detinvP_mp_byhand)"%str(eps)
else:
print "\tdetinvP_mp==detinvP_mp_byhand"
if opts.check:
if np.any(P!=P_mp):
raise StandardError, "P!=P_mp"
else:
print "\tP==P_mp"
if np.any(invP!=invP_mp):
raise StandardError, "invP!=invP_mp"
else:
print "\tinvP==invP_mp"
if np.any(detinvP!=detinvP_mp):
raise StandardError, "invP!=invP_mp"
else:
print "\tdetinvP==detinvP_mp"
print "posterior.set_dataB"
to=time.time()
posterior_obj.set_dataB()
print "\t", time.time()-to
if opts.check:
dataB = posterior_obj.dataB
dataB_conj = posterior_obj.dataB_conj
if opts.check:
if np.any(dataB!=np.conjugate(dataB_conj)):
raise StandardError, "dataB!=conj(dataB_conj)"
else:
print "\tdataB==conj(dataB_conj)"
if not opts.skip_mp:
print "posterior.set_dataB_mp"
to=time.time()
posterior_obj.set_dataB_mp(num_proc=num_proc, max_proc=max_proc)
if opts.check:
dataB_mp = posterior_obj.dataB
dataB_conj_mp = posterior_obj.dataB_conj
print "\t", time.time()-to
if opts.check:
if np.any(dataB!=dataB_mp):
raise StandardError, "dataB!=dataB_mp"
else:
print "\tdataB==dataB_mp"
if np.any(dataB_conj!=dataB_conj_mp):
raise StandardError, "dataB_conj!=dataB_conj_mp"
else:
print "\tdataB_conj==dataB_conj_mp"
#=========================================
# dpf manipulations and validation
#=========================================
if opts.dpf:
print "posterior.to_dpf"
to = time.time()
posterior_obj.to_dpf(byhand=False)
print "\t", time.time()-to
if opts.check:
Adpf = posterior_obj.A
invAdpf = posterior_obj.invA
Bdpf = posterior_obj.B
dataBdpf = posterior_obj.dataB
dataBdpf_conj = posterior_obj.dataB_conj
if np.any(dataBdpf!=np.conjugate(dataBdpf_conj)):
raise StandardError, "dataBdpf!=conj(dataBdpf_conj)"
else:
print "\tdataBdpf==conj(dataBdpf_conj)"
Pdpf = posterior_obj.P
invPdpf = posterior_obj.invP
detinvPdpf = posterior_obj.detinvP
detAdpf = np.linalg.det(Adpf)
detA = np.linalg.det(A)
if np.any(abs(detAdpf - detA) > eps*abs(detAdpf+detA)):
raise standardError, "detAdpf != detA"
elif np.any(np.linalg.det(Adpf) != np.linalg.det(A)):
print "\tdetAdpf - detA <= %s*(detAdpf + detA)"%str(eps)
else:
print "\tdetAdpf == detA"
TrA = 0.0
TrAdpf = 0.0
for x in xrange(n_pol):
TrA += A[:,:,x,x]
TrAdpf += Adpf[:,:,x,x]
if np.any(np.abs(TrA-TrAdpf) > eps*np.abs(TrA+TrAdpf)):
raise StandardError, "Tr|A| != Tr|Adpf|"
elif np.any(TrA!=TrAdpf):
print "\tTrA-TrAdpf <= %s*(TrA+TrAdpf)"%str(eps)
else:
print "\tTr|A| == Tr|Adpf|"
TrinvA = 0.0
TrinvAdpf = 0.0
for x in xrange(n_pol):
TrinvA += invA[:,:,x,x]
TrinvAdpf += invAdpf[:,:,x,x]
if np.any(np.abs(TrinvA-TrinvAdpf) > eps*np.abs(TrinvA+TrinvAdpf)):
raise StandardError, "Tr|invA| != Tr|invAdpf|"
if np.any(TrinvA!=TrinvAdpf):
print "\tTrinvA-TrinvAdpf <= %s*(TrinvA+TrinvAdpf)"%str(eps)
else:
print "\tTr|invA| == Tr|invAdpf|"
for g in xrange(n_gaus):
_detP = np.linalg.det(P[:,:,:,:,g])
_detPdpf = np.linalg.det(Pdpf[:,:,:,:,g])
if np.any(np.abs(_detP-_detPdpf) > eps*np.abs(_detP+_detPdpf)):
raise StandardError, "det|P| != det|Pdpf|"
else:
print "\tdetP-detPdpf <= %s*(detP+detPdpf)"%str(eps)
for g in xrange(n_gaus):
_detinvP = np.linalg.det(invP[:,:,:,:,g])
_detinvPdpf = np.linalg.det(invPdpf[:,:,:,:,g])
if np.any(np.abs(_detinvP-_detinvPdpf) > eps*np.abs(_detinvP+_detinvPdpf)):
raise StandardError, "det|invP| != det|invPdpf|"
else:
print "\tdetinvP - detinvPdpf <= %s*(detinvP+detinvPdpf)"%str(eps)
for g in xrange(n_gaus):
TrP = 0.0
TrPdpf = 0.0
for x in xrange(n_pol):
TrP += P[:,:,x,x,g]
TrPdpf += Pdpf[:,:,x,x,g]
if np.any(np.abs(TrP-TrPdpf) > eps*np.abs(TrP+TrPdpf)):
raise StandardError, "TrP!=TrPdpf"
else:
print "\tTrP-TrPdfp <= %s*(TrP+TrPdpf)"%str(eps)
for g in xrange(n_gaus):
TrinvP = 0.0
TrinvPdpf = 0.0
for x in xrange(n_pol):
TrinvP += invP[:,:,x,x,g]
TrinvPdpf += invPdpf[:,:,x,x,g]
if np.any(np.abs(TrinvP-TrinvPdpf) > eps*np.abs(TrinvP+TrinvPdpf)):
raise StandardError, "TrinvP!=TrinvPdpf"
else:
print "\tTrinvP-TrinvPdpf <= %s*(TrinvP+TrinvPdpf)"%str(eps)
BAB = np.zeros((n_pix, n_freqs, n_ifo, n_ifo), complex)
BABdpf = np.zeros_like(BAB, complex)
for x in xrange(n_ifo):
for y in xrange(n_ifo):
for z in xrange(n_pol):
BAB[:,:,x,y] += np.conjugate(B)[:,:,z,x] * np.sum( A[:,:,z,:] * B[:,:,:,y], axis=-1)
BABdpf[:,:,x,y] += np.conjugate(Bdpf)[:,:,z,x] * np.sum( Adpf[:,:,z,:] * Bdpf[:,:,:,y], axis=-1)
if np.any(np.abs(BAB-BABdpf) > eps*np.abs(BAB+BABdpf)):
raise StandardError, "BAB != BABdpf"
elif np.any(BAB!=BABdpf):
print "\tBAB-BABdpf <= %s*(BAB+BABdpf)"%str(eps)
else:
print "\tBAB == BABdpf"
dBABd = np.zeros((n_pix, n_freqs),complex)
dBABddpf = np.zeros_like(dBABd, complex)
for x in xrange(n_pol):
dBABd += dataB_conj[:,:,x] * np.sum(A[:,:,x,:] * dataB, axis=-1)
dBABddpf += dataBdpf_conj[:,:,x] * np.sum(Adpf[:,:,x,:] * dataBdpf, axis=-1)
if np.any(np.abs(dBABd-dBABddpf) > eps*np.abs(dBABd+dBABddpf)):
raise StandardError, "dBABd != dBABddpf"
elif np.any(dBABd != dBABddpf):
print "\tdBABd-dBABddpf <= %s*(dBABd+dBABddpf)"%str(eps)
else:
print "\tdBABd == dBABddpf"
for g in xrange(n_ifo):
BinvPB = np.zeros_like(invP[:,:,:,:,g], complex)
BinvPBdpf = np.zeros_like(BinvPB, complex)
for x in xrange(n_ifo):
for y in xrange(n_ifo):
for z in xrange(n_pol):
BinvPB[:,:,x,y] += np.conjugate(B)[:,:,z,x] * np.sum(invP[:,:,z,:,g] * B[:,:,:,y], axis=-1)
BinvPBdpf[:,:,x,y] += np.conjugate(Bdpf)[:,:,z,x] * np.sum(invPdpf[:,:,z,:,g] * Bdpf[:,:,:,y], axis=-1)
if np.any(np.abs(BinvPB-BinvPBdpf) > eps*np.abs(BinvPB+BinvPBdpf)):
raise StandardError, "BinvPB != BinvPBdpf"
elif np.any(BinvPB != BinvPBdpf):
print "\tBinvPB - BinvPBdpf <= %s*(BinvPB+BinvPBdpf)"%str(eps)
else:
print "\tBinvPB == BinvPBdpf"
dBinvPBd = np.zeros((n_pix, n_freqs), complex)
dBinvPBddpf = np.zeros_like(dBinvPBd, complex)
for x in xrange(n_pol):
dBinvPBd += dataB_conj[:,:,x] * np.sum(invP[:,:,x,:,g] * dataB, axis=-1)
dBinvPBddpf += dataBdpf_conj[:,:,x] * np.sum(invPdpf[:,:,x,:,g] * dataBdpf, axis=-1)
if np.any(np.abs(dBinvPBd-dBinvPBddpf) > eps*np.abs(dBinvPBd+dBinvPBddpf)):
raise StandardError, "dBinvPBd != dBinvPBddpf"
elif np.any(dBinvPBd != dBinvPBddpf):
print "\tdBinvPBd - dBinvPBddpf <= %s*(dBinvPBd+dBinvPBddpf)"%str(eps)
else:
print "\tdBinvPBd == dBinvPBddpf"
###
print "posterior.from_dpf"
to = time.time()
posterior_obj.from_dpf()
print "\t", time.time()-to
if opts.check:
Afrom_dpf = posterior_obj.A
invAfrom_dpf = posterior_obj.invA
Pfrom_dpf = posterior_obj.P
invPfrom_dpf = posterior_obj.invP
detinvPfrom_dpf = posterior_obj.detinvP
Bfrom_dpf = posterior_obj.B
dataBfrom_dpf = posterior_obj.dataB
if np.any(np.abs(A-Afrom_dpf) > eps*np.abs(A+Afrom_dpf)):
raise StandardError, "A != Afrom_dpf"
elif np.any(A!=Afrom_dpf):
print "\tA-Afrom_dpf <= %s*(A+Afrom_dpf)"%str(eps)
else:
print "\tA==Afrom_dpf"
if np.any(np.abs(invA-invAfrom_dpf) > eps*np.abs(invA+invAfrom_dpf)):
raise StandardError, "A != Afrom_dpf"
elif np.any(invA!=invAfrom_dpf):
print "\tinvA-invAfrom_dpf <= %s*(invA+invAfrom_dpf)"%str(eps)
else:
print "\tinvA==invAfrom_dpf"
if np.any(np.abs(P-Pfrom_dpf) > eps*np.abs(P+Pfrom_dpf)):
raise StandardError, "P != Pfrom_dpf"
elif np.any(P!=Pfrom_dpf):
print "\tP-Pfrom_dpf <= %s*(P+Pfrom_dpf)"%str(eps)
else:
print "\tP==Pfrom_dpf"
if np.any(np.abs(invP-invPfrom_dpf) > eps*np.abs(invP+invPfrom_dpf)):
raise StandardError, "invP != invPfrom_dpf"
elif np.any(invP!=invPfrom_dpf):
print "\tinvP-invPfrom_dpf <= %s*(invP+invPfrom_dpf)"%str(eps)
else:
print "\tinvP==invPfrom_dpf"
if np.any(np.abs(detinvP-detinvPfrom_dpf) > eps*np.abs(detinvP+detinvPfrom_dpf)):
raise StandardError, "invP != invPfrom_dpf"
elif np.any(detinvP!=detinvPfrom_dpf):
print "\tdetinvP-detinvPfrom_dpf <= %s*(detinvP+detinvPfrom_dpf)"%str(eps)
else:
print "\tdetinvP==detinvPfrom_dpf"
if np.any(np.abs(B-Bfrom_dpf) > eps*np.abs(B+Bfrom_dpf)):
raise StandardError, "B != Bfrom_dpf"
elif np.any(B!=Bfrom_dpf):
print "\tB-Bfrom_dpf <= %s*(B+Bfrom_dpf)"
else:
print "\tB == Bfrom_dpf"
if np.any(np.abs(dataB-dataBfrom_dpf) > eps*np.abs(dataB+dataBfrom_dpf)):
raise StandardError, "dataB != dataBfrom_dpf"
elif np.any(dataB != dataBfrom_dpf):
print "\tdataB-dataBfrom_dpf <= %s*(dataB+dataBfrom_dpf)"
else:
print "\tdataB == dataBfrom_dpf"
###
print "posterior_obj.to_dpf(byhand)"
to = time.time()
posterior_obj.to_dpf(byhand=True)
print "\t", time.time()-to
if opts.check:
Adpf_bh = posterior_obj.A
invAdpf_bh = posterior_obj.invA
Bdpf_bh = posterior_obj.B
dataBdpf_bh = posterior_obj.dataB
dataBdpf_bh_conj = posterior_obj.dataB_conj
if np.any(dataBdpf_bh!=np.conjugate(dataBdpf_bh_conj)):
raise StandardError, "dataBdpf_bh!=conj(dataBdpf_bh_conj)"
else:
print "\tdataBdpf_bh==conj(dataBdpf_bh_conj)"
Pdpf_bh = posterior_obj.P
invPdpf_bh = posterior_obj.invP
detinvPdpf_bh = posterior_obj.detinvP
detAdpf_bh = np.linalg.det(Adpf)
detA_bh = np.linalg.det(A)
if np.any(abs(detAdpf - detAdpf_bh) > eps*abs(detAdpf+detAdpf_bh)):
raise standardError, "detAdpf != detAdpf_bh"
elif np.any(np.linalg.det(Adpf) != np.linalg.det(Adpf_bh)):
print "\tdetAdpf - detAdpf_bh <= %s*(detAdpf + detAdpf_bh)"%str(eps)
else:
print "\tdetAdpf == detA"
TrAdpf_bh = 0.0
TrAdpf = 0.0
for x in xrange(n_pol):
TrAdpf_bh += Adpf_bh[:,:,x,x]
TrAdpf += Adpf[:,:,x,x]
if np.any(np.abs(TrAdpf_bh-TrAdpf) > eps*np.abs(TrAdpf_bh+TrAdpf)):
raise StandardError, "Tr|Adpf_bh| != Tr|Adpf|"
elif np.any(TrAdpf_bh!=TrAdpf):
print "\tTrAdpf_bh-TrAdpf <= %s*(TrAdpf_bh+TrAdpf)"%str(eps)
else:
print "\tTr|Adpf_bh| == Tr|Adpf|"
TrinvAdpf_bh = 0.0
TrinvAdpf = 0.0
for x in xrange(n_pol):
TrinvAdpf_bh += invAdpf_bh[:,:,x,x]
TrinvAdpf += invAdpf[:,:,x,x]
if np.any(np.abs(TrinvAdpf_bh-TrinvAdpf) > eps*np.abs(TrinvAdpf_bh+TrinvAdpf)):
raise StandardError, "Tr|invAdpf_bh| != Tr|invAdpf|"
if np.any(TrinvAdpf_bh!=TrinvAdpf):
print "\tTrinvAdpf_bh-TrinvAdpf <= %s*(TrinvAdpf_bh+TrinvAdpf)"%str(eps)
else:
print "\tTr|invAdpf_bh| == Tr|invAdpf|"
for g in xrange(n_gaus):
_detPdpf_bh = np.linalg.det(Pdpf_bh[:,:,:,:,g])
_detPdpf = np.linalg.det(Pdpf[:,:,:,:,g])
if np.any(np.abs(_detPdpf_bh-_detPdpf) > eps*np.abs(_detPdpf_bh+_detPdpf)):
raise StandardError, "det|Pdpf_bh| != det|Pdpf|"
else:
print "\tdetPdpf_bh-detPdpf <= %s*(detPdpf_bh+detPdpf)"%str(eps)
for g in xrange(n_gaus):
_detinvPdpf_bh = np.linalg.det(invPdpf_bh[:,:,:,:,g])
_detinvPdpf = np.linalg.det(invPdpf[:,:,:,:,g])
if np.any(np.abs(_detinvPdpf_bh-_detinvPdpf) > eps*np.abs(_detinvPdpf_bh+_detinvPdpf)):
raise StandardError, "det|invPdpf_bh| != det|invPdpf|"
else:
print "\tdetinvPdpf_bh - detinvPdpf <= %s*(detinvPdpf_bh+detinvPdpf)"%str(eps)
for g in xrange(n_gaus):
TrPdpf_bh = 0.0
TrPdpf = 0.0
for x in xrange(n_pol):
TrPdpf_bh += Pdpf_bh[:,:,x,x,g]
TrPdpf += Pdpf[:,:,x,x,g]
if np.any(np.abs(TrPdpf_bh-TrPdpf) > eps*np.abs(TrPdpf_bh+TrPdpf)):
raise StandardError, "TrPdpf_bh!=TrPdpf"
else:
print "\tTrPdpf_bh-TrPdfp <= %s*(TrPdpf_bh+TrPdpf)"%str(eps)
for g in xrange(n_gaus):
TrinvPdpf_bh = 0.0
TrinvPdpf = 0.0
for x in xrange(n_pol):
TrinvPdpf_bh += invPdpf_bh[:,:,x,x,g]
TrinvPdpf += invPdpf[:,:,x,x,g]
if np.any(np.abs(TrinvPdpf_bh-TrinvPdpf) > eps*np.abs(TrinvPdpf_bh+TrinvPdpf)):
raise StandardError, "TrinvPdpf_bh!=TrinvPdpf"
else:
print "\tTrinvPdpf_bh-TrinvPdpf <= %s*(TrinvPdpf_bh+TrinvPdpf)"%str(eps)
BABdpf_bh = np.zeros((n_pix, n_freqs, n_ifo, n_ifo), complex)
BABdpf = np.zeros_like(BABdpf_bh, complex)
for x in xrange(n_ifo):
for y in xrange(n_ifo):
for z in xrange(n_pol):
BABdpf_bh[:,:,x,y] += np.conjugate(Bdpf_bh)[:,:,z,x] * np.sum( Adpf_bh[:,:,z,:] * Bdpf_bh[:,:,:,y], axis=-1)
BABdpf[:,:,x,y] += np.conjugate(Bdpf)[:,:,z,x] * np.sum( Adpf[:,:,z,:] * Bdpf[:,:,:,y], axis=-1)
if np.any(np.abs(BABdpf_bh-BABdpf) > eps*np.abs(BABdpf_bh+BABdpf)):
raise StandardError, "BABdpf_bh != BABdpf"
elif np.any(BABdpf_bh!=BABdpf):
print "\tBABdpf_bh-BABdpf <= %s*(BABdpf_bh+BABdpf)"%str(eps)
else:
print "\tBABdpf_bh == BABdpf"
dBABddpf_bh = np.zeros((n_pix, n_freqs),complex)
dBABddpf = np.zeros_like(dBABddpf_bh, complex)
for x in xrange(n_pol):
dBABddpf_bh += dataBdpf_bh_conj[:,:,x] * np.sum(Adpf_bh[:,:,x,:] * dataBdpf_bh, axis=-1)
dBABddpf += dataBdpf_conj[:,:,x] * np.sum(Adpf[:,:,x,:] * dataBdpf, axis=-1)
if np.any(np.abs(dBABddpf_bh-dBABddpf) > eps*np.abs(dBABddpf_bh+dBABddpf)):
raise StandardError, "dBABddpf_bh != dBABddpf"
elif np.any(dBABddpf_bh != dBABddpf):
print "\tdBABddpf_bh-dBABddpf <= %s*(dBABddpf_bh+dBABddpf)"%str(eps)
else:
print "\tdBABddpf_bh == dBABddpf"
for g in xrange(n_ifo):
BinvPBdpf_bh = np.zeros_like(invPdpf_bh[:,:,:,:,g], complex)
BinvPBdpf = np.zeros_like(BinvPBdpf_bh, complex)
for x in xrange(n_ifo):
for y in xrange(n_ifo):
for z in xrange(n_pol):
BinvPBdpf_bh[:,:,x,y] += np.conjugate(Bdpf_bh)[:,:,z,x] * np.sum(invPdpf_bh[:,:,z,:,g] * Bdpf_bh[:,:,:,y], axis=-1)
BinvPBdpf[:,:,x,y] += np.conjugate(Bdpf)[:,:,z,x] * np.sum(invPdpf[:,:,z,:,g] * Bdpf[:,:,:,y], axis=-1)
if np.any(np.abs(BinvPBdpf_bh-BinvPBdpf) > eps*np.abs(BinvPBdpf_bh+BinvPBdpf)):
raise StandardError, "BinvPBdpf_bh != BinvPBdpf"
elif np.any(BinvPBdpf_bh != BinvPBdpf):
print "\tBinvPBdpf_bh - BinvPBdpf <= %s*(BinvPBdpf_bh+BinvPBdpf)"%str(eps)
else:
print "\tBinvPBdpf_bh == BinvPBdpf"
dBinvPBddpf_bh = np.zeros((n_pix, n_freqs), complex)
dBinvPBddpf = np.zeros_like(dBinvPBddpf_bh, complex)
for x in xrange(n_pol):
dBinvPBddpf_bh += dataBdpf_bh_conj[:,:,x] * np.sum(invPdpf_bh[:,:,x,:,g] * dataBdpf_bh, axis=-1)
dBinvPBddpf += dataBdpf_conj[:,:,x] * np.sum(invPdpf[:,:,x,:,g] * dataBdpf, axis=-1)
if np.any(np.abs(dBinvPBddpf_bh-dBinvPBddpf) > eps*np.abs(dBinvPBddpf_bh+dBinvPBddpf)):
raise StandardError, "dBinvPBddpf_bh != dBinvPBddpf"
elif np.any(dBinvPBddpf_bh != dBinvPBddpf):
print "\tdBinvPBddpf_bh - dBinvPBddpf <= %s*(dBinvPBddpf_bh+dBinvPBddpf)"%str(eps)
else:
print "\tdBinvPBddpf_bh == dBinvPBddpf"
#================================================
# pickling to save
#================================================
if opts.pkl:
print "pickling posterior into ", posterior_pklname
to=time.time()
file_obj = open(posterior_pklname, "w")
pickle.dump(posterior_obj, file_obj)
file_obj.close()
print "\t", time.time()-to
#================================================
# analysis routines that do no store data within the object
#================================================
# print "posterior.n_pol_eff()"
# to=time.time()
# posterior_obj.n_pol_eff(posterior_obj.theta, posterior_obj.phi)
# print "\t", time.time()-to
print "posterior.mle_strain"
to=time.time()
# mle_strain = posterior_obj.mle_strain(posterior_obj.theta, posterior_obj.phi, psi=0.0, n_pol_eff=None, invA_dataB=(posterior_obj.invA, posterior_obj.dataB))
mle_strain = posterior_obj.mle_strain(posterior_obj.theta, posterior_obj.phi, psi=0.0, invA_dataB=(posterior_obj.invA, posterior_obj.dataB))
if not opts.check:
del mle_strain
print "\t", time.time()-to
if not opts.skip_mp:
print "posterior.mle_strain_mp"
to=time.time()
# mle_strain_mp = posterior_obj.mle_strain_mp(posterior_obj.theta, posterior_obj.phi, 0.0, num_proc=num_proc, max_proc=max_proc, max_array_size=max_array_size, n_pol_eff=None, invA_dataB=(posterior_obj.invA, posterior_obj.dataB))
mle_strain_mp = posterior_obj.mle_strain_mp(posterior_obj.theta, posterior_obj.phi, 0.0, num_proc=num_proc, max_proc=max_proc, max_array_size=max_array_size, invA_dataB=(posterior_obj.invA, posterior_obj.dataB))
if not opts.check:
del mle_strain_mp
print "\t", time.time()-to
if opts.check:
if np.any(mle_strain!=mle_strain_mp):
raise StandardError, "mle_strain!=mle_strain_mp"
else:
print "\tmle_strain==mle_strain_mp"
print "posterior.log_posterior_elements"
to=time.time()
log_posterior_elements, n_pol_eff = posterior_obj.log_posterior_elements(posterior_obj.theta, posterior_obj.phi, psi=0.0, invP_dataB=(posterior_obj.invP, posterior_obj.detinvP, posterior_obj.dataB, posterior_obj.dataB_conj), A_invA=(posterior_obj.A, posterior_obj.invA), connection=None, diagnostic=False)
print "\t", time.time()-to
if not opts.skip_mp:
print "posterior.log_posterior_elements_mp"
to=time.time()
mp_log_posterior_elements, mp_n_pol_eff = posterior_obj.log_posterior_elements_mp(posterior_obj.theta, posterior_obj.phi, psi=0.0, invP_dataB=(posterior_obj.invP, posterior_obj.detinvP, posterior_obj.dataB, posterior_obj.dataB_conj), A_invA=(posterior_obj.A, posterior_obj.invA), diagnostic=False, num_proc=num_proc, max_proc=max_proc, max_array_size=max_array_size)
if not opts.check:
del mp_log_posterior_elements, mp_n_pol_eff
print "\t", time.time()-to
if opts.check:
if np.any(log_posterior_elements!=mp_log_posterior_elements) :
raise StandardError, "conflict between log_posterior_elements and mp_log_posterior_elements"
else:
print "\tlog_posterior_elements==mp_log_posterior_elements"
if np.any(n_pol_eff!=mp_n_pol_eff):
raise StandardError, "conflict between n_pol_eff and mp_n_pol_eff"
else:
print "\tn_pol_eff==mp_n_pol_eff"
if not opts.skip_diagnostic:
print "posterior.log_posterior_elements(diagnostic=True)"
to=time.time()
log_posterior_elements_diag, n_pol_eff_diag, (mle, cts, det) = posterior_obj.log_posterior_elements(posterior_obj.theta, posterior_obj.phi, psi=0.0, invP_dataB=(posterior_obj.invP, posterior_obj.detinvP, posterior_obj.dataB, posterior_obj.dataB_conj), A_invA=(posterior_obj.A, posterior_obj.invA), connection=None, diagnostic=True)
print "\t", time.time()-to
if opts.check:
if np.any(log_posterior_elements_diag!=log_posterior_elements):
raise StandardError, "conflict between log_posterior_elements and log_posterior_elements_diag"
else:
print "\tlog_posterior_elements_diag==log_posterior_elements"
if np.any(n_pol_eff_diag!=n_pol_eff):
raise StandardError, "conflict between n_pol_eff and n_pol_eff_diag"
else:
print "\tn_pol_eff_diag==n_pol_eff"
if not opts.skip_mp:
print "posterior.log_posterior_elements_mp(diagnostic=True)"
to=time.time()
mp_log_posterior_elements, mp_n_pol_eff, (mp_mle, mp_cts, mp_det) = posterior_obj.log_posterior_elements_mp(posterior_obj.theta, posterior_obj.phi, psi=0.0, invP_dataB=(posterior_obj.invP, posterior_obj.detinvP, posterior_obj.dataB, posterior_obj.dataB_conj), A_invA=(posterior_obj.A, posterior_obj.invA), num_proc=num_proc, max_proc=max_proc, max_array_size=max_array_size, diagnostic=True)
if not opts.check:
del mp_log_posterior_elements, mp_n_pol_eff
print "\t", time.time()-to
if opts.check:
if np.any(log_posterior_elements!=mp_log_posterior_elements):
raise StandardError, "conflict between log_posterior_elements and mp_log_posterior_elements"
if np.any(n_pol_eff!=mp_n_pol_eff):
raise StandardError, "conflict between n_pol_eff and mp_n_pol_eff"
if np.any(mle!=mp_mle):
raise StandardError, "conflict between mle and mp_mle"
if np.any(cts!=mp_cts):
raise StandardError, "conflict between cts and mp_cts"
if np.any(det!=mp_det):
raise StandardError, "conflict between det and mp_det"
if opts.check:
if np.any(np.abs(log_posterior_elements-(mle+cts+det)) > eps*np.abs(log_posterior_elements+mle+cts+det)):
raise StandardError, "log_posterior_elements!=mle+cts+det"
elif np.any(log_posterior_elements!=mle+cts+det):
print "\tlog_posterior_elements - (mle+cts+det) <= %s*(log_posterior_elements + mle+cts+det)"%str(eps)
else:
print "\tlog_posterior_elements==mle+cts+det"
n_pol_eff = n_pol_eff[0] ### we only support integer n_pol_eff right now...
print "posterior.log_posterior"
to=time.time()
log_posterior_unnorm = posterior_obj.log_posterior(posterior_obj.theta, posterior_obj.phi, log_posterior_elements, n_pol_eff, freq_truth, normalize=False)
print "\t", time.time()-to
if not opts.skip_mp:
print "posterior.log_posterior_mp"
to=time.time()
log_posterior_unnorm_mp = posterior_obj.log_posterior_mp(posterior_obj.theta, posterior_obj.phi, log_posterior_elements, n_pol_eff, freq_truth, normalize=False, num_proc=num_proc, max_proc=max_proc)
if not opts.check:
del log_posterior_mp
print "\t", time.time()-to
if opts.check:
if np.any(log_posterior_unnorm!=log_posterior_unnorm_mp):
raise StandardError, "log_posterior_unnorm!=log_posterior_unnorm_mp"
else:
print "\tlog_posterior_unnorm==log_posterior_unnorm_mp"
print "posterior.log_posterior(normalize=True)"
to=time.time()
log_posterior = posterior_obj.log_posterior(posterior_obj.theta, posterior_obj.phi, log_posterior_elements, n_pol_eff, freq_truth, normalize=True)
print "\t", time.time()-to
if not opts.skip_mp:
print "posterior.log_posterior_mp(normalize=True)"
to=time.time()
log_posterior_mp = posterior_obj.log_posterior_mp(posterior_obj.theta, posterior_obj.phi, log_posterior_elements, n_pol_eff, freq_truth, normalize=True, num_proc=num_proc, max_proc=max_proc)
if not opts.check:
del log_posterior_mp
print "\t", time.time()-to
if opts.check:
if np.any(log_posterior!=log_posterior_mp):
raise StandardError, "log_posterior!=log_posterior_mp"
else:
print "\tlog_posterior==log_posterior_mp"
print "posterior.posterior"
to=time.time()
posterior = posterior_obj.posterior(posterior_obj.theta, posterior_obj.phi, log_posterior_elements, n_pol_eff, freq_truth, normalize=True)
print "\t", time.time()-to
if not opts.skip_mp:
print "posterior.posterior_mp"
to=time.time()
posterior_mp = posterior_obj.posterior_mp(posterior_obj.theta, posterior_obj.phi, log_posterior_elements, n_pol_eff, freq_truth, normalize=True, num_proc=num_proc, max_proc=max_proc)
if not opts.check:
del posterior_mp
print "\t", time.time()-to
if opts.check:
if np.any(posterior!=posterior_mp):
raise StandardError, "posterior!=posterior_mp"
else:
print "\tposterior==posterior_mp"
if np.any(posterior!=np.exp(log_posterior)):
raise StandardError, "posterior!=np.exp(log_posterior)"
else:
print "\tposterior==np.exp(log_posterior)"
print "posterior.log_bayes"
to=time.time()
log_bayes = posterior_obj.log_bayes(log_posterior_unnorm)
print "\t", time.time()-to
print "\tn_bins=%d, logBayes=%.3f"%(np.sum(freq_truth), log_bayes)
if not opts.skip_mp:
print "posterior.log_bayes_mp"
to=time.time()
log_bayes_mp = posterior_obj.log_bayes_mp(log_posterior_unnorm, num_proc=num_proc, max_proc=max_proc)
if not opts.check:
del log_bayes_mp
print "\t", time.time()-to
if opts.check:
if np.any(np.abs(log_bayes-log_bayes_mp) > eps_bayes*np.abs(log_bayes+log_bayes_mp)):
raise StandardError, "log_bayes!=log_bayes_mp"
elif np.any(log_bayes!=log_bayes_mp):
print "\tlog_bayes-log_bayes_mp <= %s*(log_bayes+log_bayes_mp)"%str(eps_bayes)
else:
print "\tlog_bayes==log_bayes_mp"
print "posterior.bayes"
to=time.time()
bayes = posterior_obj.bayes(log_posterior_unnorm)
print "\t", time.time()-to
if not opts.skip_mp:
print "posterior.bayes_mp"
to=time.time()
bayes_mp = posterior_obj.bayes_mp(log_posterior_unnorm, num_proc=num_proc, max_proc=max_proc)
if not opts.check:
del bayes_mp
print "\t", time.time()-to
if opts.check:
if np.any(np.abs(log_bayes-log_bayes_mp) > eps_bayes*np.abs(log_bayes+log_bayes_mp)):
raise StandardError, "bayes!=bayes_mp"
elif np.any(bayes!=bayes_mp):
print "\tbayes-bayes_mp <= %s*(bayes+bayes_mp)"%str(eps_bayes)
else:
print "\tbayes==bayes_mp"
if np.any(bayes!=np.exp(log_bayes)):
raise StandardError, "bayes!=np.exp(log_bayes)"
else:
print "\tbayes==np.exp(log_bayes)"
print "posterior.__call__"
to=time.time()
posterior_call = posterior_obj()
print "\t", time.time()-to
if opts.check:
if np.any(posterior_call!=posterior):
raise StandardError, "posterior_call!=posterior"
else:
print "\tposterior_call==posterior"
print "writing posterior to file"
hp.write_map(posterior_filename, posterior)
#=========================================
# plotting
#=========================================
if not opts.skip_plots:
print "posterior.plot"
to=time.time()
posterior_obj.plot(posterior_figname, posterior=posterior, title="posterior\nNo bins=%d\nlogBayes=%.3f\n$\\rho_{net}$=%.3f"%(np.sum(freq_truth), log_bayes, snr_net_inj), unit="prob/pix", inj=injang, est=None)
posterior_obj.plot(logposterior_figname, posterior=np.log10(posterior), title="log10( posterior )\nNo bins=%d\nlogBayes=%.3f\n$\\rho_{net}$=%.3f"%(np.sum(freq_truth), log_bayes, snr_net_inj), unit="log10(prob/pix)", inj=injang, est=None) #, min=np.max(np.min(np.log10(posterior)), np.max(np.log10(posterior))-log_dynamic_range))
print "\t", time.time()-to
if not opts.skip_diagnostic_plots:
print "diagnostic plots"
### use log_poserterior_elements computed above
for g in xrange(n_gaus):
s = variances[g]**0.5
d = int(np.log10(s))-1
s = "%.3fe%d"%(s * 10**-d, d)
### mle
print "mle %s"%s
to=time.time()
_mle = np.sum(mle[:,g,:], axis=1) * df ### sum over freqs
posterior_obj.plot(diag_figname%("mle",g), posterior=np.exp(_mle), title="mle %s"%s, inj=injang, est=None)
posterior_obj.plot(logdiag_figname%("mle",g), posterior=_mle, title="log10( mle %s )"%s, inj=injang, est=None)
print "\t", time.time()-to
### cts
print "cts %s"%s
to=time.time()
_cts = np.sum(cts[:,g,:], axis=1) * df
posterior_obj.plot(diag_figname%("cts",g), posterior=np.exp(_cts), title="cts %s"%s, inj=injang, est=None)
posterior_obj.plot(logdiag_figname%("cts",g), posterior=_cts, title="log10( cts %s )"%s, inj=injang, est=None)
print "\t", time.time()-to
### det
print "det %s"%s
to=time.time()
_det = np.sum(det[:,g,:], axis=1) * df
posterior_obj.plot(diag_figname%("det",g), posterior=np.exp(_det), title="det %s"%s, inj=injang, est=None)
posterior_obj.plot(logdiag_figname%("det",g), posterior=_det, title="log10( det %s )"%s, inj=injang, est=None)
print "\t", time.time()-to
### mle+cts+det
print "mle+cts+det %s"%s
to=time.time()
posterior_obj.plot(diag_figname%("mle*cts*det",g), posterior=np.exp(_mle+_cts+_det), title="mle*cts*det %s"%s, inj=injang, est=None)
posterior_obj.plot(logdiag_figname%("mle*cts*det",g), posterior=_mle+_cts+_det, title="log10( mle*cts*det )", inj=injang, est=None)
print "\t", time.time()-to
#=========================================
# check sanity of results
#=========================================
print """WRITE TESTS FOR
the sanity of these results, not just the fact that they don't throw errors
compare F*mle_strain with data in each detector
write and test a histogram of the posterior [p(h|theta,phi,data)] for the reconstructed signal weighted by the skymap [p(theta,phi|data)] at that point. Do this at:
each detector (broadcast through antenna patterns into data stream)
geocenter
look at coverage plots for:
sky localization
frequency moments
h_rss
else?
GOOD VISUALIZATION TOOLS MAY BE USEFUL HERE!
"""
#=================================================
if opts.model_selection:
import model_selection
print "model_selection.log_bayes_cut"
to=time.time()
lbc_model, lbc_lb = model_selection.log_bayes_cut(log_bayes_thr, posterior_obj, posterior_obj.theta, posterior_obj.phi, log_posterior_elements, n_pol_eff, freq_truth, joint_log_bayes=True)
print "\t", time.time()-to
print "\tn_bins=%d, logBayes=%.3f"%(np.sum(lbc_model), lbc_lb)
if not opts.skip_mp:
print "model_selection.log_bayes_cut_mp"
to=time.time()
lbc_model_mp, lbc_lb_mp = model_selection.log_bayes_cut_mp(log_bayes_thr, posterior_obj, posterior_obj.theta, posterior_obj.phi, log_posterior_elements, n_pol_eff, freq_truth, num_proc=num_proc, max_proc=max_proc, joint_log_bayes=True)
print "\t", time.time()-to
print "\tn_bins=%d, logBayes=%.3f"%(np.sum(lbc_model_mp), lbc_lb_mp)
if not opts.check:
del lbc_model_mp, lbc_lb_mp
if opts.check:
if np.any(lbc_model!=lbc_model_mp):
raise StandardError, "lbc_model!=lbc_model_mp"
else:
print "\tlbc_model==lbc_model_mp"
if lbc_lb!=lbc_lb_mp:
raise StandardError, "lbc_model!=lbc_model_mp"
else:
print "\tlbc_model==lbc_model_mp"
print "lbc_posterior"
to=time.time()
lbc_posterior = posterior_obj.posterior(posterior_obj.theta, posterior_obj.phi, log_posterior_elements, n_pol_eff, lbc_model, normalize=True)
print "\t", time.time()-to
if not opts.skip_plots:
print "posterior.plot(log_bayes_cut)"
to=time.time()
posterior_obj.plot(lbc_posterior_figname, posterior=lbc_posterior, title="posterior\nNo bins=%d\nlogBayes=%.3f\n$\\rho_{net}$=%.3f"%(np.sum(lbc_model), lbc_lb,snr_net_inj), unit="prob/pix", inj=injang, est=None)
posterior_obj.plot(lbc_logposterior_figname, posterior=np.log10(lbc_posterior), title="log10( posterior )\nNo bins=%d\nlogBayes=%.3f\n$\\rho_{net}$=%.3f"%(np.sum(lbc_model),lbc_lb,snr_net_inj), unit="log10(prob/pix)", inj=injang, est=None)#, min=np.max(np.min(np.log10(lbc_posterior)), np.max(np.log10(lbc_posterior))-log_dynamic_range))
print "\t", time.time()-to
print "writing posterior to file"
hp.write_map(lbc_posterior_filename, lbc_posterior)
print "model_selection.fixed_bandwidth"
to=time.time()
fb_model, fb_lb = model_selection.fixed_bandwidth(posterior_obj, posterior_obj.theta, posterior_obj.phi, log_posterior_elements, n_pol_eff, freq_truth, n_bins=n_bins)
print "\t", time.time()-to
print "\tn_bins=%d, logBayes=%.3f"%(np.sum(fb_model), fb_lb)
if not opts.skip_mp:
print "model_selection.fixed_bandwidth_mp"
to=time.time()
fb_model_mp, fb_lb_mp = model_selection.fixed_bandwidth_mp(posterior_obj, posterior_obj.theta, posterior_obj.phi, log_posterior_elements, n_pol_eff, freq_truth, n_bins=n_bins, num_proc=num_proc, max_proc=max_proc, max_array_size=max_array_size)
print "\t", time.time()-to
print "\tn_bins=%d, logBayes=%.3f"%(np.sum(fb_model_mp), fb_lb_mp)
if not opts.check:
del fb_model_mp, fb_lb_mp
if opts.check:
if np.any(fb_model!=fb_model_mp):
raise StandardError, "model!=model_mp"
else:
print "\tmodel==model_mp"
if fb_lb!=fb_lb_mp:
raise StandardError, "lb!=lb_mp"
else:
print "\tlb==lb_mp"
print "fb_posterior"
to=time.time()
fb_posterior = posterior_obj.posterior(posterior_obj.theta, posterior_obj.phi, log_posterior_elements, n_pol_eff, fb_model, normalize=True)
print "\t", time.time()-to
if not opts.skip_plots:
print "posterior.plot(fixed_bandwidth)"
to=time.time()
posterior_obj.plot(fb_posterior_figname, posterior=fb_posterior, title="posterior\nNo bins=%d\nlogBayes=%.3f\n$\\rho_{net}$=%.3f"%(np.sum(fb_model),fb_lb, snr_net_inj), unit="prob/pix", inj=injang, est=None)
posterior_obj.plot(fb_logposterior_figname, posterior=np.log10(fb_posterior), title="log10( posterior )\nNo bins=%d\nlogBayes=%.3f\n$\\rho_{net}$=%.3f"%(np.sum(fb_model),fb_lb, snr_net_inj), unit="log10(prob/pix)", inj=injang, est=None)#, min=np.max(np.min(np.log10(fb_posterior)), np.max(np.log10(fb_posterior))-log_dynamic_range))
print "\t", time.time()-to
print "writing posterior to file"
hp.write_map(fb_posterior_filename, fb_posterior)
print "model_selection.variable_bandwidth"
to=time.time()
vb_model, vb_lb = model_selection.variable_bandwidth(posterior_obj, posterior_obj.theta, posterior_obj.phi, log_posterior_elements, n_pol_eff, freq_truth, min_n_bins=min_n_bins, max_n_bins=max_n_bins, dn_bins=dn_bins)
print "\t", time.time()-to
print "\tn_bins=%d, logBayes=%.3f"%(np.sum(vb_model), vb_lb)
if not opts.skip_mp:
print "model_selection.variable_bandwidth_mp"
to=time.time()
vb_model_mp, vb_lb_mp = model_selection.variable_bandwidth_mp(posterior_obj, posterior_obj.theta, posterior_obj.phi, log_posterior_elements, n_pol_eff, freq_truth, min_n_bins=min_n_bins, max_n_bins=max_n_bins, dn_bins=dn_bins, num_proc=num_proc, max_proc=max_proc, max_array_size=max_array_size)
print "\t", time.time()-to
print "\tn_bins=%d, logBayes=%.3f"%(np.sum(vb_model_mp), vb_lb_mp)
if not opts.check:
del vb_model_mp, vb_lb_mp
if opts.check:
if np.any(vb_model!=vb_model_mp):
raise StandardError, "vb_model!=vb_model_mp"
else:
print "\tvb_model==vb_model_mp"
if vb_lb!=vb_lb_mp:
raise StandardError, "vb_lb!=vb_lb_mp"
else:
print "\tvb_lb==vb_lb_mp"
print "vb_posterior"
to=time.time()
vb_posterior = posterior_obj.posterior(posterior_obj.theta, posterior_obj.phi, log_posterior_elements, n_pol_eff, vb_model, normalize=True)
print "\t", time.time()-to
if not opts.skip_plots:
print "posterior.plot(variable_bandwidth)"
to=time.time()
posterior_obj.plot(vb_posterior_figname, posterior=vb_posterior, title="posterior\nNo bins=%d\nlogBayes=%.3f\n$\\rho_{net}$=%.3f"%(np.sum(vb_model),vb_lb,snr_net_inj), unit="prob/pix", inj=injang, est=None)
posterior_obj.plot(vb_logposterior_figname, posterior=np.log10(vb_posterior), title="log10( posterior )\nNo bins=%d\nlogBayes=%.3f\n$\\rho_{net}$=%.3f"%(np.sum(vb_model),vb_lb,snr_net_inj), unit="log10(prob/pix)", inj=injang, est=None)#, min=np.max(np.min(np.log10(vb_posterior)),np.max(np.log10(vb_posterior))-log_dynamic_range))
print "\t", time.time()-to
print "writing posterior to file"
hp.write_map(vb_posterior_filename, vb_posterior)
print "model_selection.variable_bandwidth(model_selection.log_bayes_cut)"
to=time.time()
generous_lbc_model = model_selection.log_bayes_cut(generous_log_bayes_thr, posterior_obj, posterior_obj.theta, posterior_obj.phi, log_posterior_elements, n_pol_eff, freq_truth, joint_log_bayes=False)
stacked_vb_model, stacked_vb_lb = model_selection.variable_bandwidth(posterior_obj, posterior_obj.theta, posterior_obj.phi, log_posterior_elements, n_pol_eff, generous_lbc_model, min_n_bins=min_n_bins, max_n_bins=max_n_bins, dn_bins=dn_bins)
print "\t", time.time()-to
print "\tn_bins=%d->%d, logBayes=%.3f"%(np.sum(generous_lbc_model), np.sum(stacked_vb_model), stacked_vb_lb)
print "stacked_vb_posterior"
to=time.time()
stacked_vb_posterior = posterior_obj.posterior(posterior_obj.theta, posterior_obj.phi, log_posterior_elements, n_pol_eff, stacked_vb_model, normalize=True)
print "\t", time.time()-to
if not opts.skip_plots:
print "posterior.plot(variable_bandwidth(log_bayes_cut))"
to=time.time()
posterior_obj.plot(stacked_vb_posterior_figname, posterior=stacked_vb_posterior, title="posterior\nNo bins=%d\nlogBayes=%.3f\n$\\rho_{net}$=%.3f"%(np.sum(stacked_vb_model),stacked_vb_lb,snr_net_inj), unit="prob/pix", inj=injang, est=None)
posterior_obj.plot(stacked_vb_logposterior_figname, posterior=np.log10(stacked_vb_posterior), title="log10( posterior )\nNo bins=%d\nlogBayes=%.3f\n$\\rho_{net}$=%.3f"%(np.sum(stacked_vb_model),stacked_vb_lb,snr_net_inj), unit="log10(prob/pix)", inj=injang, est=None)#, min=np.max(np.min(np.log10(vb_posterior)),np.max(np.log10(vb_posterior))-log_dynamic_range))
print "\t", time.time()-to
print "writing posterior to file"
hp.write_map(stacked_vb_posterior_filename, stacked_vb_posterior)
print "setting up fixed_bandwidth models for model_selection.model_average"
to=time.time()
binNos = np.arange(n_freqs)[freq_truth]
n_models = np.sum(freq_truth)-n_bins+1
models = np.zeros((n_models, n_freqs),bool)
for modelNo in xrange(n_models):
models[modelNo][binNos[modelNo:modelNo+n_bins]] = True
print "\t", time.time()-to
print "model_selection.model_average"
to=time.time()
ma_log_posterior = model_selection.model_average(posterior_obj, posterior_obj.theta, posterior_obj.phi, log_posterior_elements, n_pol_eff, models)
ma_posterior = np.exp( ma_log_posterior )
print "\t", time.time()-to
if not opts.skip_plots:
print "posterior.plot(fixed_bandwidth model_average)"
to=time.time()
posterior_obj.plot(ma_posterior_figname, posterior=ma_posterior, title="posterior\n$\\rho_{net}$=%.3f"%(snr_net_inj), unit="prob/pix", inj=injang, est=None)
posterior_obj.plot(ma_logposterior_figname, posterior=np.log10(ma_posterior), title="log10( posterior )\n$\\rho_{net}$=%.3f"%(snr_net_inj), unit="log10(prob/pix)", inj=injang, est=None)#, min=np.max(np.min(np.log10(vb_posterior)),np.max(np.log10(vb_posterior))-log_dynamic_range))
print "\t", time.time()-to
print "writing posterior to file"
hp.write_map(ma_posterior_filename, ma_posterior)
print "model_selection.waterfill"
to=time.time()
wf_model, wf_lb = model_selection.waterfill(posterior_obj, posterior_obj.theta, posterior_obj.phi, log_posterior_elements, n_pol_eff, freq_truth, connection=None, max_array_size=max_array_size)
print "\t", time.time()-to
print "\tn_bins=%d, logBayes=%.3f"%(np.sum(wf_model), wf_lb)
print "wf_posterior"
to=time.time()
wf_posterior = posterior_obj.posterior(posterior_obj.theta, posterior_obj.phi, log_posterior_elements, n_pol_eff, wf_model, normalize=True)
print "\t", time.time()-to
if not opts.skip_plots:
print "posterior.plot(fixed_bandwidth model_average)"
to=time.time()
posterior_obj.plot(wf_posterior_figname, posterior=wf_posterior, title="posterior\nNo bins=%d\nlogBayes=%.3f\n$\\rho_{net}$=%.3f"%(np.sum(wf_model),wf_lb,snr_net_inj), unit="prob/pix", inj=injang, est=None)
posterior_obj.plot(wf_logposterior_figname, posterior=np.log10(wf_posterior), title="log10( posterior )\nNo bins=%d\nlogBayes=%.3f\n$\\rho_{net}$=%.3f"%(np.sum(wf_model),wf_lb,snr_net_inj), unit="log10(prob/pix)", inj=injang, est=None)
print "\t", time.time()-to
print "writing posterior to file"
hp.write_map(wf_posterior_filename, ma_posterior)
print """WRITE TESTS FOR
remaining model_selection (to be written?)
plot diagnostic values using the models -> see how these break down
"""
|
reedessick/bayesburst
|
methods_tests.py
|
Python
|
gpl-2.0
| 66,937
|
[
"Gaussian"
] |
c8f39d32f0d0c9987180d3614ca8ed24398f2cb4f6d6706c24c5b9112bb3bc5c
|
#
# Copyright (C) 2013-2018 The ESPResSo project
#
# This file is part of ESPResSo.
#
# ESPResSo is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ESPResSo is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
import unittest as ut
import unittest_decorators as utx
import espressomd
import numpy as np
from espressomd.interactions import RigidBond
@utx.skipIfMissingFeatures("BOND_CONSTRAINT")
class RigidBondTest(ut.TestCase):
def test(self):
target_acc = 1E-3
tol = 1.2 * target_acc
s = espressomd.System(box_l=[1.0, 1.0, 1.0])
s.seed = s.cell_system.get_state()['n_nodes'] * [1234]
s.box_l = [10, 10, 10]
s.cell_system.skin = 0.4
s.time_step = 0.01
s.thermostat.set_langevin(kT=1, gamma=1, seed=42)
r = RigidBond(r=1.2, ptol=1E-3, vtol=target_acc)
s.bonded_inter.add(r)
for i in range(5):
s.part.add(id=i, pos=(i * 1.2, 0, 0))
if i > 0:
s.part[i].bonds = ((r, i - 1),)
s.integrator.run(5000)
for i in range(1, 5):
d = s.distance(s.part[i], s.part[i - 1])
v_d = s.distance_vec(s.part[i], s.part[i - 1])
self.assertLess(abs(d - 1.2), tol)
# Velocity projection on distance vector
vel_proj = np.dot(s.part[i].v - s.part[i - 1].v, v_d) / d
self.assertLess(vel_proj, tol)
if __name__ == "__main__":
ut.main()
|
mkuron/espresso
|
testsuite/python/rigid_bond.py
|
Python
|
gpl-3.0
| 1,942
|
[
"ESPResSo"
] |
17c7ffc5add56efa67054b78c29cd7ac7d04783c8d4147bfe42b00f8be300bf5
|
#
# @BEGIN LICENSE
#
# Psi4: an open-source quantum chemistry software package
#
# Copyright (c) 2007-2017 The Psi4 Developers.
#
# The copyrights for code used from other parties are included in
# the corresponding files.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# @END LICENSE
#
"""Module to facilitate quantum chemical computations on chemical
databases. Contains Molecule class and physical constants from psi4 suite.
"""
from __future__ import absolute_import
from __future__ import print_function
from __future__ import division
__version__ = '0.4'
__author__ = 'Lori A. Burns'
# Load Python modules
import sys
from .molecule import Molecule
from .dbproc import *
from .options import *
from .qcformat import *
from . import cfour
from . import jajo
from . import orca
from .orient import OrientMols
from .dbwrap import Database, DB4 #DatabaseWrapper #ReactionDatum, Reagent, Reaction
from .libmintspointgrp import SymmetryOperation, PointGroup
from .libmintsbasisset import BasisSet
from .libmintsmolecule import LibmintsMolecule
from .basislist import *
# Load items that are useful to access from an input file
from .psiutil import *
from .physconst import *
|
kratman/psi4public
|
psi4/driver/qcdb/__init__.py
|
Python
|
gpl-2.0
| 1,840
|
[
"CFOUR",
"ORCA",
"Psi4"
] |
0ea860fcfd840ffce3a22483d3cd0c9a2530257a7f19ab1e95023a95d760deb9
|
__author__ = 'Thomas Rueckstiess, ruecksti@in.tum.de'
from scipy import random
from pybrain.structure.modules.neuronlayer import NeuronLayer
from pybrain.tools.functions import expln, explnPrime
from pybrain.structure.parametercontainer import ParameterContainer
class GaussianLayer(NeuronLayer, ParameterContainer):
""" A layer implementing a gaussian interpretation of the input. The mean is
the input, the sigmas are stored in the module parameters."""
def __init__(self, dim, name=None):
NeuronLayer.__init__(self, dim, name)
# initialize sigmas to 0
ParameterContainer.__init__(self, dim, stdParams = 0)
# if autoalpha is set to True, alpha_sigma = alpha_mu = alpha*sigma^2
self.autoalpha = False
self.enabled = True
def setSigma(self, sigma):
"""Wrapper method to set the sigmas (the parameters of the module) to a
certain value. """
assert len(sigma) == self.indim
self._params *= 0
self._params += sigma
def _forwardImplementation(self, inbuf, outbuf):
if not self.enabled:
outbuf[:] = inbuf
else:
outbuf[:] = random.normal(inbuf, expln(self.params))
def _backwardImplementation(self, outerr, inerr, outbuf, inbuf):
expln_params = expln(self.params)
self._derivs += ((outbuf - inbuf)**2 - expln_params**2) / expln_params * explnPrime(self.params)
inerr[:] = (outbuf - inbuf)
if not self.autoalpha:
inerr /= expln_params**2
self._derivs /= expln_params**2
|
comepradz/pybrain
|
pybrain/structure/modules/gaussianlayer.py
|
Python
|
bsd-3-clause
| 1,579
|
[
"Gaussian"
] |
8853682203cc1834a280af420514dbd218e0ab332f935d72cd203ecda97ddc3c
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.