text
stringlengths 12
1.05M
| repo_name
stringlengths 5
86
| path
stringlengths 4
191
| language
stringclasses 1
value | license
stringclasses 15
values | size
int32 12
1.05M
| keyword
listlengths 1
23
| text_hash
stringlengths 64
64
|
|---|---|---|---|---|---|---|---|
# -*- coding: utf-8 -*-
"""
"""
import math
import copy
from ROOT import TGraphErrors, TCanvas, TF1, TFile, kRed , kGreen, gROOT, gErrorIgnoreLevel, kError
import plotbase
import getroot
gROOT.SetBatch(True)
gErrorIgnoreLevel = kError
n_fail = [0,0,0,0,0,0]
def get_errerr(roothisto):
sqrt_nminus1 = math.sqrt(roothisto.GetEntries() - 1)
return roothisto.GetRMSError() / sqrt_nminus1
def fit_resolution(file, histo_name, tag, out_path, fit_formula = "gaus", rebin = 8, truncInSigma = 2.0, verb=False):
"""fit response with truncated gaussian
fits the resolution of an distribution with a truncated gaussian
and returns a tuple with sigma and the stat. error on sigma
"""
c = TCanvas (histo_name, histo_name, 600, 600)
fitFunc = TF1("fit1", fit_formula, 0, 2.0)
hist_resp = getroot.getobject(histo_name, file)
hist_resp.Rebin(rebin)
n = hist_resp.GetEntries()
message = ""
# first untruncated fit
fitFunc.SetParameters(1.0, 1.0, 0.25)
fitres = hist_resp.Fit(fitFunc, "SQ")
if fitres.IsValid():
gaus_norm = fitres.GetParams()[0]
gaus_center = fitres.GetParams()[1]
gaus_sigma = fitres.GetParams()[2]
gaus_sigmaerr = fitres.GetErrors()[2]
else:
print "First fit FAILED!", histo_name, "(n =", n, ")"
n_fail[0] += 1
message = "[No Gauß, n=%d]" % n
gaus_norm = 1.0
gaus_center = hist_resp.GetMean()
gaus_sigma = hist_resp.GetRMS()
gaus_sigmaerr = hist_resp.GetRMSError()
if verb:
print "Resolution: %1.4f ± %1.4f" % (gaus_sigma, gaus_sigmaerr),
# second truncated fit
if gaus_sigma < 0.04 or gaus_sigmaerr > 0.8*gaus_sigma:
n_fail[2] += 1
print "Strange sigma:", gaus_sigma, gaus_sigmaerr, "using RMS"
message += "strange sigma"
return (hist_resp.GetRMS(), 2*hist_resp.GetRMSError(), message) # 2 is arbitrary
fitFunc_trunc = TF1("fit1_trunc", fit_formula,
max(0.0, gaus_center - truncInSigma * gaus_sigma),
min(2.0, gaus_center + truncInSigma * gaus_sigma))
fitFunc_trunc.SetLineColor (kRed)
fitFunc_trunc.SetParameters(1.0, 1.0, 0.25) #gaus_norm, gaus_center, gaus_sigma)
fitres = hist_resp.Fit(fitFunc_trunc, "SRQ")
# Draw and save the fit histogram
hist_resp.Draw("")
histo_name = tag + histo_name.replace("/", "_") + "_resolution_fit.png"
#c.Print(out_path + histo_name)
del c
# extract the fit result and return it
if fitres.IsValid():
m_reso_fit = fitres.GetParams()[2]
m_reso_fit_err = fitres.GetErrors()[2]
else:
print "Second fit FAILED!", histo_name
n_fail[1] += 1
message += "\b and no truncated Gauß]"
m_reso_fit = hist_resp.GetRMS()
m_reso_fit_err = hist_resp.GetRMSError()
if verb:
print "trunc> %1.4f ± %1.4f" % (m_reso_fit, m_reso_fit_err)
return (m_reso_fit, m_reso_fit_err, message)
def extrapolate_resolution(file, base_name, # is "Pt300to1000_incut_var_CutSecondLeadingToZPt_XXX/balresp_AK5PFJetsCHSL1L2L3
tag, out_path, var=[0.1, 0.2, 0.3, 0.35], gen_imbalance = 0.0, extr_method = "linear", uncorrelate=True):
"""extrapolate resolution over alpha
# extrapolates the resolution for a set of histograms which can be defined
# by the parameter 'base_name' and all belong to one Pt-Bin
# various types of extrapolation are available and can be selected via
# the parameter 'extr_method'
"""
c = TCanvas(base_name+"2", base_name+"2", 600, 600)
local_var = copy.deepcopy(var)
local_var.reverse()
graph = TGraphErrors(len(var))
# iterate through all variations of the cut and fit the resolutions
for x in local_var:
# get the fit value
folder_var = base_name.replace("XXX", str(x).replace(".", "_")) # 0.3 -> 0_3
reso = fit_resolution(file, folder_var, tag, out_path, rebin = 5) #srebin = (n == 0) + 4 # why?
print "Variation %1.2f has resolution %1.4f ± %1.4f %s" % (x, reso[0], reso[1], reso[2])
# uncorrelate errors for all but largest point (index 0)
yerr = reso[1]
if local_var.index(x) == 0:
x0 = x
y0 = reso[0]
y0err = reso[1]
if uncorrelate and (extr_method == "linear"):
if y0err <= yerr:
yerr = math.sqrt(yerr ** 2 - y0err ** 2)
n_fail[4] +=1
else:
#print "Error not uncorrelated"
n_fail[3] +=1
# add point and error to our graph
graph.SetPoint(local_var.index(x), x, reso[0])
graph.SetPointError(local_var.index(x), 0.0, yerr)
print " ---- ", extr_method
if extr_method == "linear":
fitFunc = TF1("fit1", "[0] + [1]*(x-[2])", 0, 2.0)
fitFunc.FixParameter(0, y0)
fitFunc.FixParameter(2, x0)
fitres = graph.Fit(fitFunc, "SQ")
m_fit_err = fitres.GetErrors()[1]
m_fit = fitres.GetParams()[1]
yex = fitFunc.Eval(0.0)
# add the first error and extrapolate the rest
yex_err = math.sqrt(y0err ** 2 + (m_fit_err * x0) ** 2)
# implements the Extrapolation method described in ( JME-11-011 )
elif extr_method == "quadratic":
if gen_imbalance < 0.001:
print "ERROR: gen_imbalance =", gen_imbalance, base_name
fit_formula = "sqrt([0]^2 + [1]^2 + 2*[1]*[2]*x + [2]^2*x^2)" # V¯ a² + (b + cx)²
fitFunc = TF1("fit1", fit_formula, 0, 0.4)
# fix the gen imbalance
fitFunc.FixParameter(1, gen_imbalance)
# start parameters
fitFunc.SetParameter(0, gen_imbalance)
fitFunc.SetParameter(1, 0.0)
fitFunc.SetParameter(2, 0.5)
fitres = graph.Fit(fitFunc, "SQ")
reco_res = fitres.GetParams()[0]
print "Complex Resolution fit. GenIntr:", gen_imbalance, " RecoRes:", reco_res
yex = reco_res
yex_err = fitres.GetErrors()[0]
else:
plotbase.fail("Method " + extr_method + " not supported")
if not fitres.IsValid():
plotbase.fail("AN ALPHA FIT FAILED!")
graph.SetMarkerColor(4);
graph.SetMarkerSize(1.5);
graph.SetMarkerStyle(21);
graph.Draw("ap")
print "Extrapolated resolution for", base_name, "is %1.4f ± %1.4f, reference: %1.4f ± %1.4f" % (yex, yex_err, y0, y0err)
base_name = base_name.replace ( "/", "_")
c.Print (out_path + tag + base_name + "_resolution_extrapolation.png")
if yex < 0.01:
print "\n\nWARNING", base_name, "\n\n"
n_fail[5]+=1
return (yex, yex_err)
# plots the resolution for one algorithm, over all available PtBins
def plot_resolution ( file,
base_name, # is "YYY_incut_var_CutSecondLeadingToZPt_XXX/balresp_AK5PFJetsCHSL1L2L3
tag,
opt,
algo,
correction,
ref_hist,
fit_method = "linear",
subtract_gen = None,
draw_ax = None,
drop_first_bin = 0, drop_last_bin = 0,
draw_plot = True ):
str_bins = getroot.binstrings(opt.bins)
str_bins = str_bins[drop_first_bin:len(str_bins) if drop_last_bin==0 else -drop_last_bin]
tmp_out_path = opt.out + "/tmp_resolution/"
plotbase.EnsurePathExists( tmp_out_path )
plot = getroot.Histo()
n_access = 0
for str_bin in str_bins:
print "BIN:", str_bin
hist_template = base_name.replace("YYY", str_bin)
if not subtract_gen == None:
gen_imb = subtract_gen.y[n_access]
else:
gen_imb = 0.0
if fit_method == "none":
reso = fit_resolution(file, hist_template, tag, tmp_out_path)
else:
reso = extrapolate_resolution( file, hist_template, tag, tmp_out_path,
extr_method = fit_method,
gen_imbalance = gen_imb )
#print hist_template, "results in extrapolation", extra_res
#print ref_hist.replace("YYY", str_bin)
hist_zpt = getroot.getobject(ref_hist.replace("YYY", str_bin), file)
plot.append(hist_zpt.GetMean(), True, reso[0], reso[1])
n_access += 1
if draw_plot:
draw_ax.errorbar(plot.x, plot.y, plot.yerr, fmt='o', capsize=2, label=tag)
return plot
# plots all resolutions ( MC Truth, MC Reco and Data Reco ) in one plot
def combined_resolution( files, opt,
folder_template = "YYY_incut_var_CutSecondLeadingToZPt_XXX",
algo = "AK5PFJets",
corr = "L1L2L3",
method_name = "balresp",
filename_postfix = "",
subtract_gen = True,
drop_first = 0,
drop_last = 0):
f,ax = plotbase.newPlot()
plotbase.labels(ax, opt)
#plotbase.jetlabel(ax, algo, corr)
plotbase.axislabels(ax, 'zpt', 'resolution')
# construct names
plot_filename = "resolution_" + method_name + "_" + algo + corr + filename_postfix
hist_name = folder_template + "/" + method_name + "_" + algo + corr
hist_truth_name = "YYY_incut" + "/" + method_name + "_" + algo + corr
hist_z = "YYY_incut/zpt_" + algo + corr
s = "recogen".join(hist_truth_name.split(method_name))
print "Truth:", s
plot_resolution(files[1], s,
"MC_Truth",
opt,
algo,
corr,
hist_z,
"none",
draw_ax = ax,
drop_first_bin = drop_first, drop_last_bin = drop_last )
print "GEN"
# get the gen addition
gen_res = plot_resolution(files[1], "genbal".join(hist_truth_name.split(method_name)),
"MC_Instrinsic",
opt,
algo,
corr,
hist_z,
draw_ax = ax,
drop_first_bin = drop_first, drop_last_bin = drop_last, draw_plot = False )
print gen_res, "\n\n\n"
if subtract_gen:
title_postfix = "Reco"
else:
gen_res = None
title_postfix = "Total"
print "MC"
mc_res = plot_resolution( files[1], hist_name,
opt.labels[1] + " " + title_postfix,
opt,
algo,
corr,
hist_z,
fit_method = "quadratic",
subtract_gen = gen_res ,
draw_ax = ax,
drop_first_bin = drop_first, drop_last_bin = drop_last )
print "DATA"
data_res = plot_resolution( files[0], hist_name,
opt.labels[0] + " " + title_postfix,
opt,
algo,
corr,
hist_z,
fit_method = "quadratic",
subtract_gen = gen_res,
draw_ax = ax,
drop_first_bin = drop_first, drop_last_bin = drop_last )
ax.legend( frameon=True, numpoints=1 )
plotbase.Save(f, plot_filename, opt)
# plot ratio
ratio = getroot.Histo()
if 0 in mc_res.y:
print "HIER:", mc_res.y
for i in range(len(data_res) - 1):
ratio.append(data_res.x[i], True,
data_res.y[i] / mc_res.y[i],
abs(data_res.yerr[i] * (1.0 / mc_res.y[i])) +
abs(mc_res.yerr[i] * (data_res.y[i] / (mc_res.y[i] * mc_res.y[i])))
)
f, ax = plotbase.newPlot()
ax.errorbar(ratio.x, ratio.y, ratio.yerr, fmt='o', capsize=2 )
ax.axhline(1.0, color="black", linestyle='--')
plotbase.labels(ax, opt)
#plotbase.jetlabel(ax, algo, corr)
plotbase.axislabels(ax, 'zpt', 'resolutionratio')
plotbase.Save(f,plot_filename + "_ratio", opt)
print "1st: %d, 2nd %d, sigma: %d, uncorr: %d, corr: %d, extrapol: %d" % tuple(n_fail)
def resolution(files, opt):
# balance
#combined_resolution(files, opt,
# folder_template = "YYY_incut_var_CutSecondLeadingToZPt_XXX",
# algo = "AK5PFJets",
# corr = "L1L2L3",
# method_name = "balresp",
# filename_postfix = "_plus_gen",
# subtract_gen = False )"""
#print ">>>>>>>>>>>>>>>>>>>combined1"
combined_resolution(files, opt,
folder_template = "YYY_incut_var_CutSecondLeadingToZPt_XXX",
algo = "AK5PFJetsCHS",
corr = "L1L2L3Res",
method_name = "balresp",
filename_postfix = "_plus_gen",
subtract_gen = False )
"""combined_resolution(files, opt,
folder_template = "YYY_incut_var_CutSecondLeadingToZPt_XXX",
algo = "AK5PFJets",
corr = "L1L2L3",
method_name = "balresp",
filename_postfix = "",
subtract_gen = True,
drop_first = 2, drop_last = 1)"""
print ">>>>>>>>>>>>>>>>>>>combined2"
combined_resolution(files, opt,
folder_template = "YYY_incut_var_CutSecondLeadingToZPt_XXX",
algo = "AK5PFJetsCHS",
corr = "L1L2L3Res",
method_name = "balresp",
filename_postfix = "",
subtract_gen = True,
drop_first = 0, drop_last = 0)
# mpf >> not in gen right now
# combined_resolution( files, opt,
# folder_template = "YYY_incut_var_CutSecondLeadingToZPt_XXX",
# algo = "AK5PFJets",
# corr = "L1L2L3",
# method_name = "mpfresp",
# filename_postfix = "",
# subtract_gen = False )
# PU related -- problematic due to low statistics
#combined_resolution( files, opt,
# folder_template = "YYY_incut_var_CutSecondLeadingToZPt_XXX_var_Npv_0to2",
# algo = "AK5PFJets",
# corr = "L1L2L3",
# method_name = "balresp",
# filename_postfix = "npv_0_2" )
#combined_resolution( files, opt,
# folder_template = "YYY_incut_var_CutSecondLeadingToZPt_XXX_var_Npv_3to5",
# algo = "AK5PFJets",
# corr = "L1L2L3",
# method_name = "balresp",
# filename_postfix = "npv_3_5" )
#combined_resolution( files, opt,
# folder_template = "YYY_incut_var_CutSecondLeadingToZPt_XXX_var_Npv_6to11",
# algo = "AK5PFJets",
# corr = "L1L2L3",
# method_name = "balresp",
# filename_postfix = "npv_6_11" )
plots = ['resolution']
|
dhaitz/CalibFW
|
plotting/modules/plot_resolution.py
|
Python
|
gpl-2.0
| 15,305
|
[
"Gaussian"
] |
b92e8b2e8b5d17e0f9dfb325d47fd6511fe46f1ae4e2f2a91d4de344a2f44d06
|
# Author: Matthew Harrigan <matthew.harrigan@outlook.com>
# Contributors:
# Copyright (c) 2016, Stanford University
# All rights reserved.
from __future__ import print_function, division, absolute_import
import os
import pickle
import re
import shutil
import stat
import warnings
import mdtraj as md
import numpy as np
import pandas as pd
from jinja2 import Environment, PackageLoader
__all__ = ['backup', 'preload_top', 'preload_tops', 'load_meta', 'load_generic',
'load_trajs', 'save_meta', 'render_meta', 'save_generic',
'itertrajs', 'save_trajs', 'ProjectTemplatej']
class BackupWarning(UserWarning):
pass
def backup(fn):
"""If ``fn`` exists, rename it and issue a warning
This function will rename an existing filename {fn}.bak.{i} where
i is the smallest integer that gives a filename that doesn't exist.
This naively uses a while loop to find such a filename, so there
shouldn't be too many existing backups or performance will degrade.
Parameters
----------
fn : str
The filename to check.
"""
if not os.path.exists(fn):
return
backnum = 1
backfmt = "{fn}.bak.{backnum}"
trial_fn = backfmt.format(fn=fn, backnum=backnum)
while os.path.exists(trial_fn):
backnum += 1
trial_fn = backfmt.format(fn=fn, backnum=backnum)
warnings.warn("{fn} exists. Moving it to {newfn}"
.format(fn=fn, newfn=trial_fn),
BackupWarning)
shutil.move(fn, trial_fn)
def chmod_plus_x(fn):
st = os.stat(fn)
os.chmod(fn, st.st_mode | stat.S_IEXEC)
def default_key_to_path(key, dfmt="{}", ffmt="{}.npy"):
"""Turn an arbitrary python object into a filename
This uses string formatting, so make sure your keys map
to unique strings. If the key is a tuple, it will join each
element of the tuple with '/', resulting in a filesystem
hierarchy of files.
"""
if isinstance(key, tuple):
paths = [dfmt.format(k) for k in key[:-1]]
paths += [ffmt.format(key[-1])]
return os.path.join(*paths)
else:
return ffmt.format(key)
def validate_keys(keys, key_to_path_func=None,
valid_re=r"[a-zA-Z0-9_\-\.]+(\/[a-zA-Z0-9_\-\.]+)*"):
if key_to_path_func is None:
key_to_path_func = default_key_to_path
err = "Key must match regular expression {}".format(valid_re)
for k in keys:
ks = key_to_path_func(k)
assert isinstance(ks, str), "Key must convert to a string"
assert re.match(valid_re, ks), err
def preload_tops(meta):
"""Load all topology files into memory.
This might save some performance compared to re-parsing the topology
file for each trajectory you try to load in. Typically, you have far
fewer (possibly 1) topologies than trajectories
Parameters
----------
meta : pd.DataFrame
The DataFrame of metadata with a column named 'top_fn'
Returns
-------
tops : dict
Dictionary of ``md.Topology`` objects, keyed by "top_fn"
values.
"""
top_fns = set(meta['top_fn'])
tops = {}
for tfn in top_fns:
tops[tfn] = md.load_topology(tfn)
return tops
def preload_top(meta):
"""Load one topology file into memory.
This function checks to make sure there's only one topology file
in play. When sampling frames, you have to have all the same
topology to concatenate.
Parameters
----------
meta : pd.DataFrame
The DataFrame of metadata with a column named 'top_fn'
Returns
-------
top : md.Topology
The one topology file that can be used for all trajectories.
"""
top_fns = set(meta['top_fn'])
if len(top_fns) != 1:
raise ValueError("More than one topology is used in this project!")
return md.load(top_fns.pop())
def itertrajs(meta, stride=1):
"""Load one mdtraj trajectory at a time and yield it.
MDTraj does striding badly. It reads in the whole trajectory and
then performs a stride. We join(iterload) to conserve memory.
"""
tops = preload_tops(meta)
for i, row in meta.iterrows():
yield i, md.join(md.iterload(row['traj_fn'],
top=tops[row['top_fn']],
stride=stride),
discard_overlapping_frames=False,
check_topology=False)
def load_meta(meta_fn='meta.pandas.pickl'):
"""Load metadata associated with a project.
Parameters
----------
meta_fn : str
The filename
Returns
-------
meta : pd.DataFrame
Pandas DataFrame where each row contains metadata for a
trajectory.
"""
return pd.read_pickle(meta_fn)
def save_meta(meta, meta_fn='meta.pandas.pickl'):
"""Save metadata associated with a project.
Parameters
----------
meta : pd.DataFrame
The DataFrame of metadata
meta_fn : str
The filename
"""
backup(meta_fn)
pd.to_pickle(meta, meta_fn)
def render_meta(meta, fn="meta.pandas.html",
title="Project Metadata - MSMBuilder", pandas_kwargs=None):
"""Render a metadata dataframe as an html webpage for inspection.
Parameters
----------
meta : pd.Dataframe
The DataFrame of metadata
fn : str
Output filename (should end in html)
title : str
Page title
pandas_kwargs : dict
Arguments to be passed to pandas
"""
if pandas_kwargs is None:
pandas_kwargs = {}
kwargs_with_defaults = {
'classes': ('table', 'table-condensed', 'table-hover'),
}
kwargs_with_defaults.update(**pandas_kwargs)
env = Environment(loader=PackageLoader('msmbuilder', 'io_templates'))
templ = env.get_template("twitter-bootstrap.html")
rendered = templ.render(
title=title,
content=meta.to_html(**kwargs_with_defaults)
)
# Ugh, pandas hardcodes border="1"
rendered = re.sub(r' border="1"', '', rendered)
backup(fn)
with open(fn, 'w') as f:
f.write(rendered)
def save_generic(obj, fn):
"""Save Python objects, including msmbuilder Estimators.
This is a convenience wrapper around Python's ``pickle``
serialization scheme. This protocol is backwards-compatible
among Python versions, but may not be "forwards-compatible".
A file saved with Python 3 won't be able to be opened under Python 2.
Please read the pickle docs (specifically related to the ``protocol``
parameter) to specify broader compatibility.
If a file already exists at the given filename, it will be backed
up.
Parameters
----------
obj : object
A Python object to serialize (save to disk)
fn : str
Filename to save the object. We recommend using the '.pickl'
extension, but don't do anything to enforce that convention.
"""
backup(fn)
with open(fn, 'wb') as f:
pickle.dump(obj, f)
def load_generic(fn):
"""Load Python objects, including msmbuilder Estimators.
This is a convenience wrapper around Python's ``pickle``
serialization scheme.
Parameters
----------
fn : str
Load this file
Returns
-------
object : object
The object.
"""
with open(fn, 'rb') as f:
return pickle.load(f)
def save_trajs(trajs, fn, meta, key_to_path=None):
"""Save trajectory-like data
Data is stored in individual numpy binary files in the
directory given by ``fn``.
This method will automatically back up existing files named ``fn``.
Parameters
----------
trajs : dict of (key, np.ndarray)
Dictionary of trajectory-like ndarray's keyed on ``meta.index``
values.
fn : str
Where to save the data. This will be a directory containing
one file per trajectory
meta : pd.DataFrame
The DataFrame of metadata
"""
if key_to_path is None:
key_to_path = default_key_to_path
validate_keys(meta.index, key_to_path)
backup(fn)
os.mkdir(fn)
for k in meta.index:
v = trajs[k]
npy_fn = os.path.join(fn, key_to_path(k))
os.makedirs(os.path.dirname(npy_fn), exist_ok=True)
np.save(npy_fn, v)
def load_trajs(fn, meta='meta.pandas.pickl', key_to_path=None):
"""Load trajectory-like data
Data is expected to be stored as if saved by ``save_trajs``.
This method finds trajectories based on the ``meta`` dataframe.
If you remove a file (trajectory) from disk, be sure to remove
its row from the dataframe. If you remove a row from the dataframe,
be aware that that trajectory (file) will not be loaded, even if
it exists on disk.
Parameters
----------
fn : str
Where the data is saved. This should be a directory containing
one file per trajectory.
meta : pd.DataFrame or str
The DataFrame of metadata. If this is a string, it is interpreted
as a filename and the dataframe is loaded from disk.
Returns
-------
meta : pd.DataFrame
The DataFrame of metadata. If you passed in a string (filename)
to the ``meta`` input, this will be the loaded DataFrame. If
you gave a DataFrame object, this will just be a reference back
to that object
trajs : dict
Dictionary of trajectory-like np.ndarray's keyed on the values
of ``meta.index``.
"""
if key_to_path is None:
key_to_path = default_key_to_path
if isinstance(meta, str):
meta = load_meta(meta_fn=meta)
trajs = {}
for k in meta.index:
trajs[k] = np.load(os.path.join(fn, key_to_path(k)))
return meta, trajs
|
mpharrigan/mixtape
|
msmbuilder/io/io.py
|
Python
|
lgpl-2.1
| 9,747
|
[
"MDTraj"
] |
d8bb2d16f4340c33e691055c5ba1d4961ffa5f7d12cb3571792b1caf5574f135
|
from dopamine.trpo import *
from dopamine.discworld import *
# Create a lineworld instance.
env = DiscWorld()
# Create the policy model.
layers = []
layers.append({'input_dim': env.D, 'units': 100})
# layers.append({'units': 100, 'activation': 'relu'})
# layers.append({'units': 100, 'activation': 'relu'})
layers.append({'units': env.nb_actions, 'activation': 'tanh'})
policy = create_mlp(layers)
# Use Gaussian continuous action vectors.
pdf = DiagGaussian(env.nb_actions, stddev=1.0)
# So we have our three necessary objects! Let's create a TRPO agent.
agent = TRPOAgent(env, policy, pdf)
agent.learn()
|
lightscalar/dopamine
|
scripts/discworld_experiments.py
|
Python
|
mit
| 612
|
[
"Gaussian"
] |
f9b606802dc1c049a7b66fcfe1939ada54481d12de8f244cf25bee2dbb38c399
|
import gevent
import json
import unittest2
import base64
import os
import tempfile
import urllib2
from psdash.run import PsDashRunner
try:
import httplib
except ImportError:
# support for python 3
import http.client as httplib
class TestBasicAuth(unittest2.TestCase):
default_username = 'tester'
default_password = 'secret'
def setUp(self):
self.app = PsDashRunner().app
self.client = self.app.test_client()
def _enable_basic_auth(self, username, password):
self.app.config['PSDASH_AUTH_USERNAME'] = username
self.app.config['PSDASH_AUTH_PASSWORD'] = password
def _create_auth_headers(self, username, password):
data = base64.b64encode(':'.join([username, password]))
headers = [('Authorization', 'Basic %s' % data)]
return headers
def test_missing_credentials(self):
self._enable_basic_auth(self.default_username, self.default_password)
resp = self.client.get('/')
self.assertEqual(resp.status_code, httplib.UNAUTHORIZED)
def test_correct_credentials(self):
self._enable_basic_auth(self.default_username, self.default_password)
headers = self._create_auth_headers(self.default_username, self.default_password)
resp = self.client.get('/', headers=headers)
self.assertEqual(resp.status_code, httplib.OK)
def test_incorrect_credentials(self):
self._enable_basic_auth(self.default_username, self.default_password)
headers = self._create_auth_headers(self.default_username, 'wrongpass')
resp = self.client.get('/', headers=headers)
self.assertEqual(resp.status_code, httplib.UNAUTHORIZED)
class TestAllowedRemoteAddresses(unittest2.TestCase):
def test_correct_remote_address(self):
r = PsDashRunner({'PSDASH_ALLOWED_REMOTE_ADDRESSES': '127.0.0.1'})
resp = r.app.test_client().get('/', environ_overrides={'REMOTE_ADDR': '127.0.0.1'})
self.assertEqual(resp.status_code, httplib.OK)
def test_incorrect_remote_address(self):
r = PsDashRunner({'PSDASH_ALLOWED_REMOTE_ADDRESSES': '127.0.0.1'})
resp = r.app.test_client().get('/', environ_overrides={'REMOTE_ADDR': '10.0.0.1'})
self.assertEqual(resp.status_code, httplib.UNAUTHORIZED)
def test_multiple_remote_addresses(self):
r = PsDashRunner({'PSDASH_ALLOWED_REMOTE_ADDRESSES': '127.0.0.1, 10.0.0.1'})
resp = r.app.test_client().get('/', environ_overrides={'REMOTE_ADDR': '10.0.0.1'})
self.assertEqual(resp.status_code, httplib.OK)
resp = r.app.test_client().get('/', environ_overrides={'REMOTE_ADDR': '127.0.0.1'})
self.assertEqual(resp.status_code, httplib.OK)
resp = r.app.test_client().get('/', environ_overrides={'REMOTE_ADDR': '10.124.0.1'})
self.assertEqual(resp.status_code, httplib.UNAUTHORIZED)
def test_multiple_remote_addresses_using_list(self):
r = PsDashRunner({'PSDASH_ALLOWED_REMOTE_ADDRESSES': ['127.0.0.1', '10.0.0.1']})
resp = r.app.test_client().get('/', environ_overrides={'REMOTE_ADDR': '10.0.0.1'})
self.assertEqual(resp.status_code, httplib.OK)
resp = r.app.test_client().get('/', environ_overrides={'REMOTE_ADDR': '127.0.0.1'})
self.assertEqual(resp.status_code, httplib.OK)
resp = r.app.test_client().get('/', environ_overrides={'REMOTE_ADDR': '10.124.0.1'})
self.assertEqual(resp.status_code, httplib.UNAUTHORIZED)
class TestEnvironmentWhitelist(unittest2.TestCase):
def test_show_only_whitelisted(self):
r = PsDashRunner({'PSDASH_ENVIRON_WHITELIST': ['USER']})
resp = r.app.test_client().get('/process/%d/environment' % os.getpid())
self.assertTrue(os.environ['USER'] in resp.data)
self.assertTrue('*hidden by whitelist*' in resp.data)
class TestUrlPrefix(unittest2.TestCase):
def setUp(self):
self.default_prefix = '/subfolder/'
def test_page_not_found_on_root(self):
r = PsDashRunner({'PSDASH_URL_PREFIX': self.default_prefix})
resp = r.app.test_client().get('/')
self.assertEqual(resp.status_code, httplib.NOT_FOUND)
def test_works_on_prefix(self):
r = PsDashRunner({'PSDASH_URL_PREFIX': self.default_prefix})
resp = r.app.test_client().get(self.default_prefix)
self.assertEqual(resp.status_code, httplib.OK)
def test_multiple_level_prefix(self):
r = PsDashRunner({'PSDASH_URL_PREFIX': '/use/this/folder/'})
resp = r.app.test_client().get('/use/this/folder/')
self.assertEqual(resp.status_code, httplib.OK)
def test_missing_starting_slash_works(self):
r = PsDashRunner({'PSDASH_URL_PREFIX': 'subfolder/'})
resp = r.app.test_client().get('/subfolder/')
self.assertEqual(resp.status_code, httplib.OK)
def test_missing_trailing_slash_works(self):
r = PsDashRunner({'PSDASH_URL_PREFIX': '/subfolder'})
resp = r.app.test_client().get('/subfolder/')
self.assertEqual(resp.status_code, httplib.OK)
class TestHttps(unittest2.TestCase):
def _run(self, https=False):
options = {'PSDASH_PORT': 5051}
if https:
options.update({
'PSDASH_HTTPS_KEYFILE': os.path.join(os.path.dirname(__file__), 'keyfile'),
'PSDASH_HTTPS_CERTFILE': os.path.join(os.path.dirname(__file__), 'cacert.pem')
})
self.r = PsDashRunner(options)
self.runner = gevent.spawn(self.r.run)
gevent.sleep(0.3)
def tearDown(self):
self.r.server.close()
self.runner.kill()
gevent.sleep(0.3)
def test_https_dont_work_without_certs(self):
self._run()
self.assertRaises(urllib2.URLError, urllib2.urlopen, 'https://127.0.0.1:5051')
def test_https_works_with_certs(self):
self._run(https=True)
resp = urllib2.urlopen('https://127.0.0.1:5051')
self.assertEqual(resp.getcode(), httplib.OK)
class TestEndpoints(unittest2.TestCase):
def setUp(self):
self.r = PsDashRunner()
self.app = self.r.app
self.client = self.app.test_client()
self.pid = os.getpid()
self.r.get_local_node().net_io_counters.update()
def test_index(self):
resp = self.client.get('/')
self.assertEqual(resp.status_code, httplib.OK)
@unittest2.skipIf('TRAVIS' in os.environ, 'Functionality not supported on Travis CI')
def test_disks(self):
resp = self.client.get('/disks')
self.assertEqual(resp.status_code, httplib.OK)
def test_network(self):
resp = self.client.get('/network')
self.assertEqual(resp.status_code, httplib.OK)
def test_processes(self):
resp = self.client.get('/processes')
self.assertEqual(resp.status_code, httplib.OK)
def test_process_overview(self):
resp = self.client.get('/process/%d' % self.pid)
self.assertEqual(resp.status_code, httplib.OK)
@unittest2.skipIf(os.environ.get('USER') == 'root', 'It would fail as root as we would have access to pid 1')
def test_process_no_access(self):
resp = self.client.get('/process/1') # pid 1 == init
self.assertEqual(resp.status_code, httplib.UNAUTHORIZED)
def test_process_non_existing_pid(self):
resp = self.client.get('/process/0')
self.assertEqual(resp.status_code, httplib.NOT_FOUND)
def test_process_children(self):
resp = self.client.get('/process/%d/children' % self.pid)
self.assertEqual(resp.status_code, httplib.OK)
def test_process_connections(self):
resp = self.client.get('/process/%d/connections' % self.pid)
self.assertEqual(resp.status_code, httplib.OK)
def test_process_environment(self):
resp = self.client.get('/process/%d/environment' % self.pid)
self.assertEqual(resp.status_code, httplib.OK)
def test_process_files(self):
resp = self.client.get('/process/%d/files' % self.pid)
self.assertEqual(resp.status_code, httplib.OK)
def test_process_threads(self):
resp = self.client.get('/process/%d/threads' % self.pid)
self.assertEqual(resp.status_code, httplib.OK)
def test_process_memory(self):
resp = self.client.get('/process/%d/memory' % self.pid)
self.assertEqual(resp.status_code, httplib.OK)
@unittest2.skipIf('TRAVIS' in os.environ, 'Functionality not supported on Travis CI')
def test_process_limits(self):
resp = self.client.get('/process/%d/limits' % self.pid)
self.assertEqual(resp.status_code, httplib.OK)
def test_process_invalid_section(self):
resp = self.client.get('/process/%d/whatnot' % self.pid)
self.assertEqual(resp.status_code, httplib.NOT_FOUND)
def test_non_existing(self):
resp = self.client.get('/prettywronghuh')
self.assertEqual(resp.status_code, httplib.NOT_FOUND)
def test_connection_filters(self):
resp = self.client.get('/network?laddr=127.0.0.1')
self.assertEqual(resp.status_code, httplib.OK)
def test_register_node(self):
resp = self.client.get('/register?name=examplehost&port=500')
self.assertEqual(resp.status_code, httplib.OK)
def test_register_node_all_params_required(self):
resp = self.client.get('/register?name=examplehost')
self.assertEqual(resp.status_code, httplib.BAD_REQUEST)
resp = self.client.get('/register?port=500')
self.assertEqual(resp.status_code, httplib.BAD_REQUEST)
class TestLogs(unittest2.TestCase):
def _create_log_file(self):
fd, filename = tempfile.mkstemp()
fp = os.fdopen(fd, 'w')
fp.write('woha\n' * 100)
fp.write('something\n')
fp.write('woha\n' * 100)
fp.flush()
return filename
def setUp(self):
self.r = PsDashRunner()
self.app = self.r.app
self.client = self.app.test_client()
self.filename = self._create_log_file()
self.r.get_local_node().logs.add_available(self.filename)
def test_logs(self):
resp = self.client.get('/logs')
self.assertEqual(resp.status_code, httplib.OK)
def test_logs_removed_file(self):
filename = self._create_log_file()
self.r.get_local_node().logs.add_available(filename)
# first visit to make sure the logs are properly initialized
resp = self.client.get('/logs')
self.assertEqual(resp.status_code, httplib.OK)
os.unlink(filename)
resp = self.client.get('/logs')
self.assertEqual(resp.status_code, httplib.OK)
def test_logs_removed_file_uninitialized(self):
filename = self._create_log_file()
self.r.get_local_node().logs.add_available(filename)
os.unlink(filename)
resp = self.client.get('/logs')
self.assertEqual(resp.status_code, httplib.OK)
def test_view(self):
resp = self.client.get('/log?filename=%s' % self.filename)
self.assertEqual(resp.status_code, httplib.OK)
def test_search(self):
resp = self.client.get('/log/search?filename=%s&text=%s' % (self.filename, 'something'),
environ_overrides={'HTTP_X_REQUESTED_WITH': 'xmlhttprequest'})
self.assertEqual(resp.status_code, httplib.OK)
try:
data = json.loads(resp.data)
self.assertIn('something', data['content'])
except ValueError:
self.fail('Log search did not return valid json data')
def test_read(self):
resp = self.client.get('/log?filename=%s' % self.filename,
environ_overrides={'HTTP_X_REQUESTED_WITH': 'xmlhttprequest'})
self.assertEqual(resp.status_code, httplib.OK)
def test_read_tail(self):
resp = self.client.get('/log?filename=%s&seek_tail=1' % self.filename)
self.assertEqual(resp.status_code, httplib.OK)
def test_non_existing_file(self):
filename = "/var/log/surelynotaroundright.log"
resp = self.client.get('/log?filename=%s' % filename)
self.assertEqual(resp.status_code, httplib.NOT_FOUND)
resp = self.client.get('/log/search?filename=%s&text=%s' % (filename, 'something'))
self.assertEqual(resp.status_code, httplib.NOT_FOUND)
resp = self.client.get('/log/read?filename=%s' % filename)
self.assertEqual(resp.status_code, httplib.NOT_FOUND)
resp = self.client.get('/log/read_tail?filename=%s' % filename)
self.assertEqual(resp.status_code, httplib.NOT_FOUND)
if __name__ == '__main__':
unittest2.main()
|
uaomer/psdash
|
tests/test_web.py
|
Python
|
cc0-1.0
| 12,633
|
[
"VisIt"
] |
94d838183a12e2959c0d794f09b622220b311e978faf686eb96f6140f638ee8e
|
# Copyright 2013 Pau Haro Negre
# based on C++ code by Carl Staelin Copyright 2009-2011
#
# See the NOTICE file distributed with this work for additional information
# regarding copyright ownership.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from collections import namedtuple
import numpy as np
import random
class Synapse(namedtuple('Synapse', ['offset', 'delay'])):
"""A Synapse represents a connection between the neuron's input dendrites
and the output axons of other neurons.
Attributes:
offset: Identifies a synapse of the neuron.
delay: Represents the time the signal takes to traverse the axon to
reach the synapse. Takes a value in range(D1).
"""
pass
class Word(object):
"""An input Word represents the input signals to the neuron for a time
period.
A Word contains a list of those input synapses that fired for the
most recent given excitation pattern.
Attributes:
synapses: A set of pairs containing the syanpses that fired and the
associated delay.
"""
def __init__(self, fired_syn=[]):
"""Inits Word class.
Args:
fired_syn: List of pairs of input synapses that fired and
associated delays. Can only contain positive synapse offset
values.
"""
if len(fired_syn) > 0 and sorted(fired_syn)[0][0] < 0:
raise ValueError('synapse offset values have to be positive')
self.synapses = [Synapse(*s) for s in fired_syn]
class WordSet(object):
"""An array of Words.
Wordset is simply an array of Word instances, which may also store
information regarding the delay slot learned for the word during training.
Attributes:
words: Array of Word instances.
delays: Delay slots learned for each word during training.
"""
def __init__(self, num_words, word_length, num_delays, num_active=None,
refractory_period=None):
"""Inits WordSet class.
Args:
num_words: Number of Words to initialize the WordSet with.
word_length: Number of synapses in a Word.
num_delays: Number of delay slots.
num_active: Number of active synapses per word.
refractory_period: Average number of different patterns presented
before a given neuron fires.
"""
# Distribution of the number of active synapses per word?
if num_active != None:
# fixed: N
N_array = np.empty(num_words, int)
N_array.fill(num_active)
else:
# binomial: B(S0, R/S0)
R_S0 = refractory_period/float(word_length) # R/S0
N_array = np.random.binomial(word_length, R_S0, num_words)
# Generate the set of words and set delays to 0
synapses = range(word_length)
self.words = [Word(zip(
random.sample(synapses, N), # active synapses
np.random.randint(num_delays, size=N))) # active delays
for N in N_array]
self.delays = [0] * num_words
class Neuron(object):
"""Models a CE neuron.
Attributes:
S0: Number of synapses.
H: Number of synapses needed to fire a neuron.
G: Ratio of strong synapse strength to weak synapse strength, binary
approximation.
C: Number of dendrite compartments capable of firing independently.
D1: Number of possible time slots where neurons can produce spikes.
D2: Number of different time delays available between two neural
layers.
synapses: Represents a connection between the neuron's input dendrites
and the output axons of other neurons. Each row of the array
contains 3 fields:
- strength: Strength of the synapse.
- delay: Represents the time the signal takes to traverse the axon
to reach the synapse. Takes a value in range(D2).
- container: The dendrite compartment of this synapse.
training: whether the neuron is in training mode.
"""
def __init__(self, S0 = 200, H = 5.0, G = 2.0, C = 1, D1 = 4, D2 = 7):
"""Inits Neuron class.
Args:
S0: Number of synapses.
H: Number of synapses needed to fire a neuron.
G: Ratio of strong synapse strength to weak synapse strength,
binary approximation.
C: Number of dendrite compartments capable of firing independently.
D1: Number of possible time slots where neurons can produce spikes.
D2: Number of different time delays available between two neural
layers.
"""
self.S0 = S0
self.H = H
self.G = G
self.C = C
self.D1 = D1
self.D2 = D2
self.training = False
self.synapses = np.zeros(S0, dtype='float32,uint16,uint16')
self.synapses.dtype.names = ('strength', 'delay', 'container')
self.synapses['strength'] = 1.0
self.synapses['delay'] = np.random.randint(D2, size=S0)
self.synapses['container'] = np.random.randint(C, size=S0)
def expose(self, w):
"""Models how the neuron reacts to excitation patterns, and how it
computes whether or not to fire.
Expose computes the weighted sum of the input word, and the neuron fires
if that sum meets or exceeds a threshold. The weighted sum is the sum of
the S0 element-by-element products of the most recent neuron vector and
the current word.
Args:
w: A Word to present to the neuron.
Returns:
A 3-element tuple containing:
0. A Boolean indicating whether the neuron fired or not.
1. The delay in which the neuron has fired.
2. The container where the firing occurred.
"""
offsets = [syn.offset for syn in w.synapses]
delays = [syn.delay for syn in w.synapses]
synapses = self.synapses[offsets]
# Iterate over delays until neuron fires
for d in range(self.D1 + self.D2):
delay_indices = (synapses['delay'] + delays) == d
# Compute the weighted sum of the firing inputs for each container
for c in range(self.C):
container_indices = synapses['container'] == c
indices = delay_indices & container_indices
s = synapses['strength'][indices].sum()
# Check if the container has fired
if (self.training and s >= self.H) or s >= self.H*self.G:
return (True, d, c)
# If no container has fired for any delay
return (False, None, None)
def train(self, w):
"""Trains a neuron with an input word.
To train a neuron, "train" is called for each word to be recognized. If
the neuron fires for that word then all synapses that contributed to
that firing have their strengths irreversibly increased to G.
Args:
w: A Word to train the neuron with.
Returns:
A Boolean indicating whether the neuron fired or not.
"""
if not self.training:
print "[WARN] train(w) was called when not in training mode."
return False
fired, delay, container = self.expose(w)
if not fired: return False
# Update the synapses that contributed to the firing
offsets = [s.offset for s in w.synapses]
delays = [syn.delay for syn in w.synapses]
synapses = self.synapses[offsets]
delay_indices = (synapses['delay'] + delays) == delay
container_indices = synapses['container'] == container
active_indices = delay_indices & container_indices
indices = np.zeros(self.S0, dtype=bool)
indices[offsets] = active_indices
self.synapses['strength'][indices] = self.G
return True
def start_training(self):
"""Set the neuron in training mode.
"""
self.training = True
def finish_training(self):
"""Set the neuron in recognition mode.
Once the training is complete, the neuron's threshold value H is set
to H*G.
"""
self.training = False
|
pauh/neuron
|
cognon_extended.py
|
Python
|
apache-2.0
| 8,867
|
[
"NEURON"
] |
affd9d9cbde0edfe0c639f13cdf03686860d504d98315849c0de0f03d7cc8329
|
"""NFW profiles for shear and magnification.
Surface mass density and differential surface mass density calculations
for NFW dark matter halos, with and without the effects of miscentering
offsets.
"""
from __future__ import absolute_import, division, print_function
import numpy as np
from astropy import units
from scipy.integrate import simps
from clusterlensing import utils
def _set_dimensionless_radius(self, radii=None, integration=False):
if radii is None:
radii = self._rbins # default radii
# calculate x = radii / rs
if integration is True:
# radii is a 3D matrix (r_eq13 <-> numTh,numRoff,numRbins)
d0, d1, d2 = radii.shape[0], radii.shape[1], radii.shape[2]
# with each cluster's rs, we now have a 4D matrix:
radii_4D = radii.reshape(d0, d1, d2, 1)
rs_4D = self._rs.reshape(1, 1, 1, self._nlens)
x = radii_4D / rs_4D
else:
# 1D array of radii and clusters, reshape & broadcast
radii_repeated = radii.reshape(1, self._nbins)
rs_repeated = self._rs.reshape(self._nlens, 1)
x = radii_repeated / rs_repeated
x = x.value
if 0. in x:
x[np.where(x == 0.)] = 1.e-10 # hack to avoid infs in sigma
# dimensionless radius
self._x = x
# set the 3 cases of dimensionless radius x
self._x_small = np.where(self._x < 1. - 1.e-6)
self._x_big = np.where(self._x > 1. + 1.e-6)
self._x_one = np.where(np.abs(self._x - 1) <= 1.e-6)
class SurfaceMassDensity(object):
"""Calculate NFW profiles for Sigma and Delta-Sigma.
Parameters
----------
rs : array_like
Scale radii (in Mpc) for every halo. Should be 1D, optionally with
astropy.units of Mpc.
delta_c : array_like
Characteristic overdensities for every halo. Should be 1D and have
the same length as rs.
rho_crit : array_like
Critical energy density of the universe (in Msun/Mpc/pc^2) at every
halo z. Should be 1D, optionally with astropy.units of
Msun/Mpc/(pc**2), and have the same length as rs.
offsets : array_like, optional
Width of the Gaussian distribution of miscentering offsets (in
Mpc), for every cluster halo. Should be 1D, optionally with
astropy.units of Mpc, and have the same length as rs. (Note: it is
common to use the same value for every halo, implying that they are
drawn from the same offset distribution).
rbins : array_like, optional
Radial bins (in Mpc) at which profiles will be calculated. Should
be 1D, optionally with astropy.units of Mpc.
Other Parameters
-------------------
numTh : int, optional
Number of bins to use for integration over theta, for calculating
offset profiles (no effect for offsets=None). Default 200.
numRoff : int, optional
Number of bins to use for integration over R_off, for calculating
offset profiles (no effect for offsets=None). Default 200.
numRinner : int, optional
Number of bins at r < min(rbins) to use for integration over
Sigma(<r), for calculating DeltaSigma (no effect for Sigma ever,
and no effect for DeltaSigma if offsets=None). Default 20.
factorRouter : int, optional
Factor increase over number of rbins, at min(r) < r < max(r), of
bins that will be used at for integration over Sigma(<r), for
calculating DeltaSigma (no effect for Sigma, and no effect for
DeltaSigma if offsets=None). Default 3.
Methods
----------
sigma_nfw()
Calculate surface mass density Sigma.
deltasigma_nfw()
Calculate differential surface mass density DeltaSigma.
See Also
----------
ClusterEnsemble : Parameters and profiles for a sample of clusters.
This class provides an interface to SurfaceMassDensity, and tracks
a DataFrame of parameters as well as nfw profiles for many clusters
at once, only requiring the user to specify cluster z and richness,
at a minimum.
References
----------
Sigma and DeltaSigma are calculated using the formulas given in:
C.O. Wright and T.G. Brainerd, "Gravitational Lensing by NFW Halos,"
The Astrophysical Journal, Volume 534, Issue 1, pp. 34-40 (2000).
The offset profiles are calculated using formulas given, e.g., in
Equations 11-15 of:
J. Ford, L. Van Waerbeke, M. Milkeraitis, et al., "CFHTLenS: a weak
lensing shear analysis of the 3D-Matched-Filter galaxy clusters,"
Monthly Notices of the Royal Astronomical Society, Volume 447, Issue 2,
p.1304-1318 (2015).
"""
def __init__(self, rs, delta_c, rho_crit, offsets=None, rbins=None,
numTh=200, numRoff=200, numRinner=20, factorRouter=3):
if rbins is None:
rmin, rmax = 0.1, 5.
rbins = np.logspace(np.log10(rmin), np.log10(rmax), num=50)
self._rbins = rbins * units.Mpc
else:
# check rbins input units & type
self._rbins = utils.check_units_and_type(rbins, units.Mpc)
# check input units & types
self._rs = utils.check_units_and_type(rs, units.Mpc)
self._delta_c = utils.check_units_and_type(delta_c, None)
self._rho_crit = utils.check_units_and_type(rho_crit, units.Msun /
units.Mpc / (units.pc**2))
self._numRoff = utils.check_units_and_type(numRoff, None,
is_scalar=True)
self._numTh = utils.check_units_and_type(numTh, None, is_scalar=True)
self._numRinner = utils.check_units_and_type(numRinner, None,
is_scalar=True)
self._factorRouter = utils.check_units_and_type(factorRouter, None,
is_scalar=True)
# check numbers of bins are all positive
if (numRoff <= 0) or (numTh <= 0) or (numRinner <= 0) or \
(factorRouter <= 0):
raise ValueError('Require numbers of bins > 0')
self._nbins = self._rbins.shape[0]
self._nlens = self._rs.shape[0]
if offsets is not None:
self._sigmaoffset = utils.check_units_and_type(offsets, units.Mpc)
utils.check_input_size(self._sigmaoffset, self._nlens)
else:
self._sigmaoffset = offsets
# check array sizes are compatible
utils.check_input_size(self._rs, self._nlens)
utils.check_input_size(self._delta_c, self._nlens)
utils.check_input_size(self._rho_crit, self._nlens)
utils.check_input_size(self._rbins, self._nbins)
rs_dc_rcrit = self._rs * self._delta_c * self._rho_crit
self._rs_dc_rcrit = rs_dc_rcrit.reshape(self._nlens,
1).repeat(self._nbins, 1)
# set self._x, self._x_big, self._x_small, self._x_one
_set_dimensionless_radius(self)
def sigma_nfw(self):
"""Calculate NFW surface mass density profile.
Generate the surface mass density profiles of each cluster halo,
assuming a spherical NFW model. Optionally includes the effect of
cluster miscentering offsets, if the parent object was initialized
with offsets.
Returns
----------
Quantity
Surface mass density profiles (ndarray, in astropy.units of
Msun/pc/pc). Each row corresponds to a single cluster halo.
"""
def _centered_sigma(self):
# perfectly centered cluster case
# calculate f
bigF = np.zeros_like(self._x)
f = np.zeros_like(self._x)
numerator_arg = ((1. / self._x[self._x_small]) +
np.sqrt((1. / (self._x[self._x_small]**2)) - 1.))
denominator = np.sqrt(1. - (self._x[self._x_small]**2))
bigF[self._x_small] = np.log(numerator_arg) / denominator
bigF[self._x_big] = (np.arccos(1. / self._x[self._x_big]) /
np.sqrt(self._x[self._x_big]**2 - 1.))
f = (1. - bigF) / (self._x**2 - 1.)
f[self._x_one] = 1. / 3.
if np.isnan(np.sum(f)) or np.isinf(np.sum(f)):
print('\nERROR: f is not all real\n')
# calculate & return centered profiles
if f.ndim == 2:
sigma = 2. * self._rs_dc_rcrit * f
else:
rs_dc_rcrit_4D = self._rs_dc_rcrit.T.reshape(1, 1,
f.shape[2],
f.shape[3])
sigma = 2. * rs_dc_rcrit_4D * f
return sigma
def _offset_sigma(self):
# size of "x" arrays to integrate over
numRoff = self._numRoff
numTh = self._numTh
numRbins = self._nbins
maxsig = self._sigmaoffset.value.max()
# inner/outer bin edges
roff_1D = np.linspace(0., 4. * maxsig, numRoff)
theta_1D = np.linspace(0., 2. * np.pi, numTh)
rMpc_1D = self._rbins.value
# reshape for broadcasting: (numTh,numRoff,numRbins)
theta = theta_1D.reshape(numTh, 1, 1)
roff = roff_1D.reshape(1, numRoff, 1)
rMpc = rMpc_1D.reshape(1, 1, numRbins)
r_eq13 = np.sqrt(rMpc ** 2 + roff ** 2 -
2. * rMpc * roff * np.cos(theta))
# 3D array r_eq13 -> 4D dimensionless radius (nlens)
_set_dimensionless_radius(self, radii=r_eq13, integration=True)
sigma = _centered_sigma(self)
inner_integrand = sigma.value / (2. * np.pi)
# INTEGRATE OVER theta
sigma_of_RgivenRoff = simps(inner_integrand, x=theta_1D, axis=0,
even='first')
# theta is gone, now dimensions are: (numRoff,numRbins,nlens)
sig_off_3D = self._sigmaoffset.value.reshape(1, 1, self._nlens)
roff_v2 = roff_1D.reshape(numRoff, 1, 1)
PofRoff = (roff_v2 / (sig_off_3D**2) *
np.exp(-0.5 * (roff_v2 / sig_off_3D)**2))
dbl_integrand = sigma_of_RgivenRoff * PofRoff
# INTEGRATE OVER Roff
# (integration axis=0 after theta is gone).
sigma_smoothed = simps(dbl_integrand, x=roff_1D, axis=0,
even='first')
# reset _x to correspond to input rbins (default)
_set_dimensionless_radius(self)
sigma_sm = np.array(sigma_smoothed.T) * units.solMass / units.pc**2
return sigma_sm
if self._sigmaoffset is None:
finalsigma = _centered_sigma(self)
elif np.abs(self._sigmaoffset).sum() == 0:
finalsigma = _centered_sigma(self)
else:
finalsigma = _offset_sigma(self)
self._sigma_sm = finalsigma
return finalsigma
def deltasigma_nfw(self):
"""Calculate NFW differential surface mass density profile.
Generate the differential surface mass density profiles of each cluster
halo, assuming a spherical NFW model. Optionally includes the effect of
cluster miscentering offsets, if the parent object was initialized
with offsets.
Returns
----------
Quantity
Differential surface mass density profiles (ndarray, in
astropy.units of Msun/pc/pc). Each row corresponds to a single
cluster halo.
"""
def _centered_dsigma(self):
# calculate g
firstpart = np.zeros_like(self._x)
secondpart = np.zeros_like(self._x)
g = np.zeros_like(self._x)
small_1a = 4. / self._x[self._x_small]**2
small_1b = 2. / (self._x[self._x_small]**2 - 1.)
small_1c = np.sqrt(1. - self._x[self._x_small]**2)
firstpart[self._x_small] = (small_1a + small_1b) / small_1c
big_1a = 8. / (self._x[self._x_big]**2 *
np.sqrt(self._x[self._x_big]**2 - 1.))
big_1b = 4. / ((self._x[self._x_big]**2 - 1.)**1.5)
firstpart[self._x_big] = big_1a + big_1b
small_2a = np.sqrt((1. - self._x[self._x_small]) /
(1. + self._x[self._x_small]))
secondpart[self._x_small] = np.log((1. + small_2a) /
(1. - small_2a))
big_2a = self._x[self._x_big] - 1.
big_2b = 1. + self._x[self._x_big]
secondpart[self._x_big] = np.arctan(np.sqrt(big_2a / big_2b))
both_3a = (4. / (self._x**2)) * np.log(self._x / 2.)
both_3b = 2. / (self._x**2 - 1.)
g = firstpart * secondpart + both_3a - both_3b
g[self._x_one] = (10. / 3.) + 4. * np.log(0.5)
if np.isnan(np.sum(g)) or np.isinf(np.sum(g)):
print('\nERROR: g is not all real\n', g)
# calculate & return centered profile
deltasigma = self._rs_dc_rcrit * g
return deltasigma
def _offset_dsigma(self):
original_rbins = self._rbins.value
# if offset sigma was already calculated, use it!
try:
sigma_sm_rbins = self._sigma_sm
except AttributeError:
sigma_sm_rbins = self.sigma_nfw()
innermost_sampling = 1.e-10 # stable for anything below 1e-5
inner_prec = self._numRinner
r_inner = np.linspace(innermost_sampling,
original_rbins.min(),
endpoint=False, num=inner_prec)
outer_prec = self._factorRouter * self._nbins
r_outer = np.linspace(original_rbins.min(),
original_rbins.max(),
endpoint=False, num=outer_prec + 1)[1:]
r_ext_unordered = np.hstack([r_inner, r_outer, original_rbins])
r_extended = np.sort(r_ext_unordered)
# set temporary extended rbins, nbins, x, rs_dc_rcrit array
self._rbins = r_extended * units.Mpc
self._nbins = self._rbins.shape[0]
_set_dimensionless_radius(self) # uses _rbins, _nlens
rs_dc_rcrit = self._rs * self._delta_c * self._rho_crit
self._rs_dc_rcrit = rs_dc_rcrit.reshape(self._nlens,
1).repeat(self._nbins, 1)
sigma_sm_extended = self.sigma_nfw()
mean_inside_sigma_sm = np.zeros([self._nlens,
original_rbins.shape[0]])
for i, r in enumerate(original_rbins):
index_of_rbin = np.where(r_extended == r)[0][0]
x = r_extended[0:index_of_rbin + 1]
y = sigma_sm_extended[:, 0:index_of_rbin + 1] * x
integral = simps(y, x=x, axis=-1, even='first')
# average of sigma_sm at r < rbin
mean_inside_sigma_sm[:, i] = (2. / r**2) * integral
mean_inside_sigma_sm = mean_inside_sigma_sm * (units.Msun /
units.pc**2)
# reset original rbins, nbins, x
self._rbins = original_rbins * units.Mpc
self._nbins = self._rbins.shape[0]
_set_dimensionless_radius(self)
rs_dc_rcrit = self._rs * self._delta_c * self._rho_crit
self._rs_dc_rcrit = rs_dc_rcrit.reshape(self._nlens,
1).repeat(self._nbins, 1)
self._sigma_sm = sigma_sm_rbins # reset to original sigma_sm
dsigma_sm = mean_inside_sigma_sm - sigma_sm_rbins
return dsigma_sm
if self._sigmaoffset is None:
finaldeltasigma = _centered_dsigma(self)
elif np.abs(self._sigmaoffset).sum() == 0:
finaldeltasigma = _centered_dsigma(self)
else:
finaldeltasigma = _offset_dsigma(self)
return finaldeltasigma
# Notes on Integration
# ------------------------
# Among the choices for numerical integration algorithms, the
# following options were considered:
# (1) scipy.integrate.dblquad is the obvious choice, but is far too
# slow because it makes of order 10^5 calls to the function to be
# integrated, even for generous settings of epsabs, epsrel. Likely
# it is getting stuck in non-smooth portions of the function space.
# (2) scipy.integrate.simps is fast and converges faster than the
# midpoint integration for both the integration over Roff and theta.
# (3) scipy.integrate.romb was somewhat slower than simps and as well
# as the midpoint rule integration.
# (4) midpoint rule integration via a Riemann Sum (the choice used in
# the previous reincarnation of this project in the C programming
# language) is about the same speed as simps, and converges smoothly
# for both integrals, but requires a much larger number of bins to
# converge to the best estimate.
# (5) numpy.trapz underestimates concave down functions.
|
jesford/cluster-lensing
|
clusterlensing/nfw.py
|
Python
|
mit
| 17,308
|
[
"Galaxy",
"Gaussian"
] |
531a75fb5cd1b5c868a74f54f4a98d69de9796ca4640b70802b664dd56e9bba5
|
# Copyright 2007 by Michiel de Hoon. All rights reserved.
# This code is part of the Biopython distribution and governed by its
# license. Please see the LICENSE file that should have been included
# as part of this package.
"""Code to work with the sprotXX.dat file from SwissProt.
http://www.expasy.ch/sprot/sprot-top.html
Tested with:
Release 56.9, 03-March-2009.
Classes:
- Record Holds SwissProt data.
- Reference Holds reference data from a SwissProt record.
Functions:
- read Read one SwissProt record
- parse Read multiple SwissProt records
"""
from __future__ import print_function
from Bio._py3k import _as_string
__docformat__ = "restructuredtext en"
class Record(object):
"""Holds information from a SwissProt record.
Members:
- entry_name Name of this entry, e.g. RL1_ECOLI.
- data_class Either 'STANDARD' or 'PRELIMINARY'.
- molecule_type Type of molecule, 'PRT',
- sequence_length Number of residues.
- accessions List of the accession numbers, e.g. ['P00321']
- created A tuple of (date, release).
- sequence_update A tuple of (date, release).
- annotation_update A tuple of (date, release).
- description Free-format description.
- gene_name Gene name. See userman.txt for description.
- organism The source of the sequence.
- organelle The origin of the sequence.
- organism_classification The taxonomy classification. List of strings.
(http://www.ncbi.nlm.nih.gov/Taxonomy/)
- taxonomy_id A list of NCBI taxonomy id's.
- host_organism A list of names of the hosts of a virus, if any.
- host_taxonomy_id A list of NCBI taxonomy id's of the hosts, if any.
- references List of Reference objects.
- comments List of strings.
- cross_references List of tuples (db, id1[, id2][, id3]). See the docs.
- keywords List of the keywords.
- features List of tuples (key name, from, to, description).
from and to can be either integers for the residue
numbers, '<', '>', or '?'
- seqinfo tuple of (length, molecular weight, CRC32 value)
- sequence The sequence.
"""
def __init__(self):
self.entry_name = None
self.data_class = None
self.molecule_type = None
self.sequence_length = None
self.accessions = []
self.created = None
self.sequence_update = None
self.annotation_update = None
self.description = []
self.gene_name = ''
self.organism = []
self.organelle = ''
self.organism_classification = []
self.taxonomy_id = []
self.host_organism = []
self.host_taxonomy_id = []
self.references = []
self.comments = []
self.cross_references = []
self.keywords = []
self.features = []
self.seqinfo = None
self.sequence = ''
class Reference(object):
"""Holds information from one reference in a SwissProt entry.
Members:
number Number of reference in an entry.
evidence Evidence code. List of strings.
positions Describes extent of work. List of strings.
comments Comments. List of (token, text).
references References. List of (dbname, identifier).
authors The authors of the work.
title Title of the work.
location A citation for the work.
"""
def __init__(self):
self.number = None
self.positions = []
self.comments = []
self.references = []
self.authors = []
self.title = []
self.location = []
def parse(handle):
while True:
record = _read(handle)
if not record:
return
yield record
def read(handle):
record = _read(handle)
if not record:
raise ValueError("No SwissProt record found")
# We should have reached the end of the record by now
remainder = handle.read()
if remainder:
raise ValueError("More than one SwissProt record found")
return record
# Everything below is considered private
def _read(handle):
record = None
unread = ""
for line in handle:
# This is for Python 3 to cope with a binary handle (byte strings),
# or a text handle (unicode strings):
line = _as_string(line)
key, value = line[:2], line[5:].rstrip()
if unread:
value = unread + " " + value
unread = ""
if key == '**':
# See Bug 2353, some files from the EBI have extra lines
# starting "**" (two asterisks/stars). They appear
# to be unofficial automated annotations. e.g.
# **
# ** ################# INTERNAL SECTION ##################
# **HA SAM; Annotated by PicoHamap 1.88; MF_01138.1; 09-NOV-2003.
pass
elif key == 'ID':
record = Record()
_read_id(record, line)
_sequence_lines = []
elif key == 'AC':
accessions = [word for word in value.rstrip(";").split("; ")]
record.accessions.extend(accessions)
elif key == 'DT':
_read_dt(record, line)
elif key == 'DE':
record.description.append(value.strip())
elif key == 'GN':
if record.gene_name:
record.gene_name += " "
record.gene_name += value
elif key == 'OS':
record.organism.append(value)
elif key == 'OG':
record.organelle += line[5:]
elif key == 'OC':
cols = [col for col in value.rstrip(";.").split("; ")]
record.organism_classification.extend(cols)
elif key == 'OX':
_read_ox(record, line)
elif key == 'OH':
_read_oh(record, line)
elif key == 'RN':
reference = Reference()
_read_rn(reference, value)
record.references.append(reference)
elif key == 'RP':
assert record.references, "RP: missing RN"
record.references[-1].positions.append(value)
elif key == 'RC':
assert record.references, "RC: missing RN"
reference = record.references[-1]
unread = _read_rc(reference, value)
elif key == 'RX':
assert record.references, "RX: missing RN"
reference = record.references[-1]
_read_rx(reference, value)
elif key == 'RL':
assert record.references, "RL: missing RN"
reference = record.references[-1]
reference.location.append(value)
# In UniProt release 1.12 of 6/21/04, there is a new RG
# (Reference Group) line, which references a group instead of
# an author. Each block must have at least 1 RA or RG line.
elif key == 'RA':
assert record.references, "RA: missing RN"
reference = record.references[-1]
reference.authors.append(value)
elif key == 'RG':
assert record.references, "RG: missing RN"
reference = record.references[-1]
reference.authors.append(value)
elif key == "RT":
assert record.references, "RT: missing RN"
reference = record.references[-1]
reference.title.append(value)
elif key == 'CC':
_read_cc(record, line)
elif key == 'DR':
_read_dr(record, value)
elif key == 'PE':
# TODO - Record this information?
pass
elif key == 'KW':
_read_kw(record, value)
elif key == 'FT':
_read_ft(record, line)
elif key == 'SQ':
cols = value.split()
assert len(cols) == 7, "I don't understand SQ line %s" % line
# Do more checking here?
record.seqinfo = int(cols[1]), int(cols[3]), cols[5]
elif key == ' ':
_sequence_lines.append(value.replace(" ", "").rstrip())
elif key == '//':
# Join multiline data into one string
record.description = " ".join(record.description)
record.organism = " ".join(record.organism)
record.organelle = record.organelle.rstrip()
for reference in record.references:
reference.authors = " ".join(reference.authors).rstrip(";")
reference.title = " ".join(reference.title).rstrip(";")
if reference.title.startswith('"') and reference.title.endswith('"'):
reference.title = reference.title[1:-1] # remove quotes
reference.location = " ".join(reference.location)
record.sequence = "".join(_sequence_lines)
return record
else:
raise ValueError("Unknown keyword '%s' found" % key)
if record:
raise ValueError("Unexpected end of stream.")
def _read_id(record, line):
cols = line[5:].split()
# Prior to release 51, included with MoleculeType:
# ID EntryName DataClass; MoleculeType; SequenceLength AA.
#
# Newer files lack the MoleculeType:
# ID EntryName DataClass; SequenceLength AA.
if len(cols) == 5:
record.entry_name = cols[0]
record.data_class = cols[1].rstrip(";")
record.molecule_type = cols[2].rstrip(";")
record.sequence_length = int(cols[3])
elif len(cols) == 4:
record.entry_name = cols[0]
record.data_class = cols[1].rstrip(";")
record.molecule_type = None
record.sequence_length = int(cols[2])
else:
raise ValueError("ID line has unrecognised format:\n" + line)
# check if the data class is one of the allowed values
allowed = ('STANDARD', 'PRELIMINARY', 'IPI', 'Reviewed', 'Unreviewed')
if record.data_class not in allowed:
raise ValueError("Unrecognized data class %s in line\n%s" %
(record.data_class, line))
# molecule_type should be 'PRT' for PRoTein
# Note that has been removed in recent releases (set to None)
if record.molecule_type not in (None, 'PRT'):
raise ValueError("Unrecognized molecule type %s in line\n%s" %
(record.molecule_type, line))
def _read_dt(record, line):
value = line[5:]
uprline = value.upper()
cols = value.rstrip().split()
if 'CREATED' in uprline \
or 'LAST SEQUENCE UPDATE' in uprline \
or 'LAST ANNOTATION UPDATE' in uprline:
# Old style DT line
# =================
# e.g.
# DT 01-FEB-1995 (Rel. 31, Created)
# DT 01-FEB-1995 (Rel. 31, Last sequence update)
# DT 01-OCT-2000 (Rel. 40, Last annotation update)
#
# or:
# DT 08-JAN-2002 (IPI Human rel. 2.3, Created)
# ...
# find where the version information will be located
# This is needed for when you have cases like IPI where
# the release verison is in a different spot:
# DT 08-JAN-2002 (IPI Human rel. 2.3, Created)
uprcols = uprline.split()
rel_index = -1
for index in range(len(uprcols)):
if 'REL.' in uprcols[index]:
rel_index = index
assert rel_index >= 0, \
"Could not find Rel. in DT line: %s" % line
version_index = rel_index + 1
# get the version information
str_version = cols[version_index].rstrip(",")
# no version number
if str_version == '':
version = 0
# dot versioned
elif '.' in str_version:
version = str_version
# integer versioned
else:
version = int(str_version)
date = cols[0]
if 'CREATED' in uprline:
record.created = date, version
elif 'LAST SEQUENCE UPDATE' in uprline:
record.sequence_update = date, version
elif 'LAST ANNOTATION UPDATE' in uprline:
record.annotation_update = date, version
else:
assert False, "Shouldn't reach this line!"
elif 'INTEGRATED INTO' in uprline \
or 'SEQUENCE VERSION' in uprline \
or 'ENTRY VERSION' in uprline:
# New style DT line
# =================
# As of UniProt Knowledgebase release 7.0 (including
# Swiss-Prot release 49.0 and TrEMBL release 32.0) the
# format of the DT lines and the version information
# in them was changed - the release number was dropped.
#
# For more information see bug 1948 and
# http://ca.expasy.org/sprot/relnotes/sp_news.html#rel7.0
#
# e.g.
# DT 01-JAN-1998, integrated into UniProtKB/Swiss-Prot.
# DT 15-OCT-2001, sequence version 3.
# DT 01-APR-2004, entry version 14.
#
# This is a new style DT line...
# The date should be in string cols[1]
# Get the version number if there is one.
# For the three DT lines above: 0, 3, 14
try:
version = int(cols[-1])
except ValueError:
version = 0
date = cols[0].rstrip(",")
# Re-use the historical property names, even though
# the meaning has changed slighty:
if "INTEGRATED" in uprline:
record.created = date, version
elif 'SEQUENCE VERSION' in uprline:
record.sequence_update = date, version
elif 'ENTRY VERSION' in uprline:
record.annotation_update = date, version
else:
assert False, "Shouldn't reach this line!"
else:
raise ValueError("I don't understand the date line %s" % line)
def _read_ox(record, line):
# The OX line used to be in the simple format:
# OX DESCRIPTION=ID[, ID]...;
# If there are too many id's to fit onto a line, then the ID's
# continue directly onto the next line, e.g.
# OX DESCRIPTION=ID[, ID]...
# OX ID[, ID]...;
# Currently, the description is always "NCBI_TaxID".
# To parse this, I need to check to see whether I'm at the
# first line. If I am, grab the description and make sure
# it's an NCBI ID. Then, grab all the id's.
#
# As of the 2014-10-01 release, there may be an evidence code, e.g.
# OX NCBI_TaxID=418404 {ECO:0000313|EMBL:AEX14553.1};
# In the short term, we will ignore any evidence codes:
line = line.split('{')[0]
if record.taxonomy_id:
ids = line[5:].rstrip().rstrip(";")
else:
descr, ids = line[5:].rstrip().rstrip(";").split("=")
assert descr == "NCBI_TaxID", "Unexpected taxonomy type %s" % descr
record.taxonomy_id.extend(ids.split(', '))
def _read_oh(record, line):
# Line type OH (Organism Host) for viral hosts
assert line[5:].startswith("NCBI_TaxID="), "Unexpected %s" % line
line = line[16:].rstrip()
assert line[-1] == "." and line.count(";") == 1, line
taxid, name = line[:-1].split(";")
record.host_taxonomy_id.append(taxid.strip())
record.host_organism.append(name.strip())
def _read_rn(reference, rn):
# This used to be a very simple line with a reference number, e.g.
# RN [1]
# As of the 2014-10-01 release, there may be an evidence code, e.g.
# RN [1] {ECO:0000313|EMBL:AEX14553.1}
words = rn.split(None, 1)
number = words[0]
assert number.startswith('[') and number.endswith(']'), "Missing brackets %s" % number
reference.number = int(number[1:-1])
if len(words) > 1:
evidence = words[1]
assert evidence.startswith('{') and evidence.endswith('}'), "Missing braces %s" % evidence
reference.evidence = evidence[1:-1].split('|')
def _read_rc(reference, value):
cols = value.split(';')
if value[-1] == ';':
unread = ""
else:
cols, unread = cols[:-1], cols[-1]
for col in cols:
if not col: # last column will be the empty string
return
# The token is everything before the first '=' character.
i = col.find("=")
if i >= 0:
token, text = col[:i], col[i + 1:]
comment = token.lstrip(), text
reference.comments.append(comment)
else:
comment = reference.comments[-1]
comment = "%s %s" % (comment, col)
reference.comments[-1] = comment
return unread
def _read_rx(reference, value):
# The basic (older?) RX line is of the form:
# RX MEDLINE; 85132727.
# but there are variants of this that need to be dealt with (see below)
# CLD1_HUMAN in Release 39 and DADR_DIDMA in Release 33
# have extraneous information in the RX line. Check for
# this and chop it out of the line.
# (noticed by katel@worldpath.net)
value = value.replace(' [NCBI, ExPASy, Israel, Japan]', '')
# RX lines can also be used of the form
# RX PubMed=9603189;
# reported by edvard@farmasi.uit.no
# and these can be more complicated like:
# RX MEDLINE=95385798; PubMed=7656980;
# RX PubMed=15060122; DOI=10.1136/jmg 2003.012781;
# We look for these cases first and deal with them
warn = False
if "=" in value:
cols = value.split("; ")
cols = [x.strip() for x in cols]
cols = [x for x in cols if x]
for col in cols:
x = col.split("=")
if len(x) != 2 or x == ("DOI", "DOI"):
warn = True
break
assert len(x) == 2, "I don't understand RX line %s" % value
reference.references.append((x[0], x[1].rstrip(";")))
# otherwise we assume we have the type 'RX MEDLINE; 85132727.'
else:
cols = value.split("; ")
# normally we split into the three parts
if len(cols) != 2:
warn = True
else:
reference.references.append((cols[0].rstrip(";"), cols[1].rstrip(".")))
if warn:
import warnings
from Bio import BiopythonParserWarning
warnings.warn("Possibly corrupt RX line %r" % value,
BiopythonParserWarning)
def _read_cc(record, line):
key, value = line[5:8], line[9:].rstrip()
if key == '-!-': # Make a new comment
record.comments.append(value)
elif key == ' ': # add to the previous comment
if not record.comments:
# TCMO_STRGA in Release 37 has comment with no topic
record.comments.append(value)
else:
record.comments[-1] += " " + value
def _read_dr(record, value):
cols = value.rstrip(".").split('; ')
record.cross_references.append(tuple(cols))
def _read_kw(record, value):
# Old style - semi-colon separated, multi-line. e.g. Q13639.txt
# KW Alternative splicing; Cell membrane; Complete proteome;
# KW Disulfide bond; Endosome; G-protein coupled receptor; Glycoprotein;
# KW Lipoprotein; Membrane; Palmitate; Polymorphism; Receptor; Transducer;
# KW Transmembrane.
#
# New style as of 2014-10-01 release with evidence codes, e.g. H2CNN8.txt
# KW Monooxygenase {ECO:0000313|EMBL:AEX14553.1};
# KW Oxidoreductase {ECO:0000313|EMBL:AEX14553.1}.
# For now to match the XML parser, drop the evidence codes.
for value in value.rstrip(";.").split('; '):
if value.endswith("}"):
# Discard the evidence code
value = value.rsplit("{", 1)[0]
record.keywords.append(value.strip())
def _read_ft(record, line):
line = line[5:] # get rid of junk in front
name = line[0:8].rstrip()
try:
from_res = int(line[9:15])
except ValueError:
from_res = line[9:15].lstrip()
try:
to_res = int(line[16:22])
except ValueError:
to_res = line[16:22].lstrip()
# if there is a feature_id (FTId), store it away
if line[29:35] == r"/FTId=":
ft_id = line[35:70].rstrip()[:-1]
description = ""
else:
ft_id = ""
description = line[29:70].rstrip()
if not name: # is continuation of last one
assert not from_res and not to_res
name, from_res, to_res, old_description, old_ft_id = record.features[-1]
del record.features[-1]
description = ("%s %s" % (old_description, description)).strip()
# special case -- VARSPLIC, reported by edvard@farmasi.uit.no
if name == "VARSPLIC":
# Remove unwanted spaces in sequences.
# During line carryover, the sequences in VARSPLIC can get mangled
# with unwanted spaces like:
# 'DISSTKLQALPSHGLESIQT -> PCRATGWSPFRRSSPC LPTH'
# We want to check for this case and correct it as it happens.
descr_cols = description.split(" -> ")
if len(descr_cols) == 2:
first_seq, second_seq = descr_cols
extra_info = ''
# we might have more information at the end of the
# second sequence, which should be in parenthesis
extra_info_pos = second_seq.find(" (")
if extra_info_pos != -1:
extra_info = second_seq[extra_info_pos:]
second_seq = second_seq[:extra_info_pos]
# now clean spaces out of the first and second string
first_seq = first_seq.replace(" ", "")
second_seq = second_seq.replace(" ", "")
# reassemble the description
description = first_seq + " -> " + second_seq + extra_info
record.features.append((name, from_res, to_res, description, ft_id))
if __name__ == "__main__":
print("Quick self test...")
example_filename = "../../Tests/SwissProt/sp008"
import os
if not os.path.isfile(example_filename):
print("Missing test file %s" % example_filename)
else:
# Try parsing it!
with open(example_filename) as handle:
records = parse(handle)
for record in records:
print(record.entry_name)
print(",".join(record.accessions))
print(record.keywords)
print(repr(record.organism))
print(record.sequence[:20] + "...")
|
poojavade/Genomics_Docker
|
Dockerfiles/gedlab-khmer-filter-abund/pymodules/python2.7/lib/python/Bio/SwissProt/__init__.py
|
Python
|
apache-2.0
| 22,397
|
[
"Biopython"
] |
6586672616d805cd95fad1941cebc186d43bcb779e192d4b0ad7c0cd23da82b6
|
'''
sbclearn (c) University of Manchester 2017
sbclearn is licensed under the MIT License.
To view a copy of this license, visit <http://opensource.org/licenses/MIT/>.
@author: neilswainston
'''
from scipy import stats
def coeff_corr(i, j):
'''coeff_correlation.'''
_, _, r_value, _, _ = stats.linregress(i, j)
return r_value**2
|
synbiochem/synbiochem-learn
|
gg_learn/utils/__init__.py
|
Python
|
mit
| 347
|
[
"VisIt"
] |
86dc88a123fb95c66c964bcca18c82c78ac6d6a3d1e3f5a1121187165ff25cf5
|
from guiData import *
from PyQt4 import QtCore, QtGui
import functools
class BCTab(QtGui.QWidget):
"""This class represents the bc tab"""
# pass in the gui data object and a function to change visibility of vtk parts
def __init__(self, data, visibilityChangeFunc):
self.data = data
self.visibilityChangeFunc = visibilityChangeFunc
super(BCTab, self).__init__()
addBCButton = QtGui.QPushButton('Add Boundary Condition', self)
addBCButton.clicked.connect(functools.partial(self.BoundaryConditionPopup, self.data))
addBCButton.setToolTip('Add a new boundary condition')
addBCButton.resize(addBCButton.sizeHint())
addBCButton.move(10, 10)
def clearBCVizBoxes(self):
checkBoxes = self.findChildren(QtGui.QCheckBox)
for i in reversed(range(0, len(checkBoxes))):
checkBoxes[i].setParent(None)
def drawBCVizBoxes(self):
# Clear the old boxes if they exist
self.clearBCVizBoxes()
# This loop assumes that the vtk parts are created in the same order as the factags
irow = 0
for tag in self.data.elemGroups:
if int(tag) >= 0:
checkViz = QtGui.QCheckBox('factag: ' + str(tag), self)
checkViz.setChecked(True)
else:
checkViz = QtGui.QCheckBox('voltag: ' + str(tag), self)
checkViz.setChecked(False) # hide volumes by default
checkViz.stateChanged.connect(functools.partial(self.visibilityChangeFunc, int(irow), int(tag)))
checkViz.move(20, 40+25*irow)
irow = irow + 1
self.bcwindow = None
def BoundaryConditionPopup(self, data):
print "Opening a new BC popup window..."
self.bcwindow = BCPopup(data)
self.bcwindow.setGeometry(QtCore.QRect(100, 100, 600, 300))
self.bcwindow.show()
class BCPopup(QtGui.QWidget):
"""This class represents the bc popup for BC editing"""
def __init__(self, data):
QtGui.QWidget.__init__(self)
self.numBCsSet = len(data.elemGroups)
self.bcIdSelected = 0
self.bcTypeSelected = 'NULL'
# setup layout
self.vl = QtGui.QGridLayout()
self.vl.setSpacing(10)
self.setLayout(self.vl)
# List of factags (i.e. BCs to set)
self.bcList = QtGui.QComboBox()
for tag in data.elemGroups:
self.bcList.addItem('factag: ' + str(tag))
self.bcList.activated[str].connect(self.bcListActivate)
self.vl.addWidget(self.bcList,0,0)
# List of bc types available
self.typeList = QtGui.QComboBox()
for type in data.bcTypes:
self.typeList.addItem(type)
self.typeList.activated[str].connect(self.bcTypeActivate)
self.vl.addWidget(self.typeList,1,0)
# List of current BCs set
bcCols = 4
bcRows = self.numBCsSet
self.bcTable = QtGui.QTableWidget()
self.bcTable.setRowCount(self.numBCsSet)
self.bcTable.setColumnCount(bcCols)
self.bcTable.resize(self.bcTable.sizeHint())
irow = 0
for tag in data.elemGroups:
self.bcTable.setItem(irow,1, QtGui.QTableWidgetItem(str(tag)))
irow = irow + 1
self.bcTable.setHorizontalHeaderLabels(("BoundaryID; Factag; head3; Nickname").split(";"))
self.bcTable.show()
self.vl.addWidget(self.bcTable,2,1)
self.addBCButton = QtGui.QPushButton('Apply')
self.addBCButton.clicked.connect(self.createBCObj)
self.addBCButton.setToolTip('Apply Boundary Condition')
self.addBCButton.resize(self.addBCButton.sizeHint())
self.addBCButton.setStyleSheet("background-color: #5BC85B")
self.vl.addWidget(self.addBCButton,3,1)
def bcListActivate(self, text):
print 'Combo selection changed to ' + text
self.bcIdSelected = text
def bcTypeActivate(self, text):
print 'Combo selection changed to ' + text
self.bcTypeSelected = text
def treeChanged(self, item, column):
print 'tree changed'
def createBCObj(self):
print 'Apply clicked'
|
ngcurrier/ProteusCFD
|
GUI/BCTab.py
|
Python
|
gpl-3.0
| 4,203
|
[
"VTK"
] |
122eef9c496ea4aaa9c7a00dd4498675e699c2523a161b9fc6edb9948679026e
|
#
# This is an example script for accessing a Magpie server
# from Python. To use it you first need to generate the
# Python interface code with Apache Thrift:
#
# thrift --gen py magpie.thrift
#
# Then, start a Magpie server with models for predicting
# the volume and bandgap energy of a material when provided
# with its composition. There should eventually be a
# publically-accessible server running this, so you
# eventually won't need this step.
#
# Author: Logan Ward
# Date: 18 Feb 2015
import sys
sys.path.append('gen-py')
from magpie import *
from magpie.ttypes import *
from thrift import Thrift
from thrift.transport import TSocket
from thrift.transport import TTransport
from thrift.protocol import TBinaryProtocol
try:
# Make socket
transport = TSocket.TSocket('localhost', 4581)
transport = TTransport.TBufferedTransport(transport)
protocol = TBinaryProtocol.TBinaryProtocol(transport)
client = MagpieServer.Client(protocol)
# Connect!
transport.open()
# Evaluate properties of NaCl
entry = Entry()
entry.name = "NaCl"
res = client.evaluateProperties([entry], ["bandgap"])
entry = res[0]
print "Predicted bandgap of %s: %s eV"%(entry.name, entry.predictedProperties['bandgap'])
# Search for materials with a band gap closest to 5.4 eV
res = client.searchSingleObjective("bandgap minimize TargetEntryRanker 5.4",
"PhaseDiagramCompositionEntryGenerator 1 2 -crystal 5 Ni Fe O Si F S Cu Au Zn Ge Na Cl",
10)
print "Materials with a band gap close to 5.4 eV:"
for e in res:
print "\t%s"%e.name
# Search for materials with a band gap close to 5.4 eV and minimum specific volume
res = client.searchMultiObjective(10.0,
["bandgap minimize TargetEntryRanker 5.4", "volume minimize SimpleEntryRanker"],
"PhaseDiagramCompositionEntryGenerator 1 2 -crystal 5 Ni Fe O Si F S Cu Au Zn Ge Na Cl",
10)
print "Materials with a band gap close to 5.4 eV and minimum specific volume:"
for e in res:
print "\t%s"%e.name
# Close!
transport.close()
except MagpieException, mx:
print mx
print '%s' % (mx.why)
except Thrift.TException, tx:
print '%s' % (tx.message)
|
amarkrishna/demo1
|
thrift/magpie-client.py
|
Python
|
mit
| 2,145
|
[
"CRYSTAL"
] |
19e514238d771b2cfe28f4b1a0ed40d027cb519b60f44502059e2fdd633a41d3
|
#################################################################
# Class DirectoryListing
# Author: A.T.
# Added 02.03.2015
#################################################################
import stat
from DIRAC import gConfig
from DIRAC.ConfigurationSystem.Client.Helpers import Registry
class DirectoryListing(object):
def __init__(self):
self.entries = []
def addFile(self, name, fileDict, repDict, numericid):
"""Pretty print of the file ls output"""
perm = fileDict["Mode"]
date = fileDict["ModificationDate"]
# nlinks = fileDict.get('NumberOfLinks',0)
nreplicas = len(repDict)
size = fileDict["Size"]
if "Owner" in fileDict:
uname = fileDict["Owner"]
elif "OwnerDN" in fileDict:
result = Registry.getUsernameForDN(fileDict["OwnerDN"])
if result["OK"]:
uname = result["Value"]
else:
uname = "unknown"
else:
uname = "unknown"
if numericid:
uname = str(fileDict["UID"])
if "OwnerGroup" in fileDict:
gname = fileDict["OwnerGroup"]
elif "OwnerRole" in fileDict:
groups = Registry.getGroupsWithVOMSAttribute("/" + fileDict["OwnerRole"])
if groups:
if len(groups) > 1:
gname = groups[0]
default_group = gConfig.getValue("/Registry/DefaultGroup", "unknown")
if default_group in groups:
gname = default_group
else:
gname = groups[0]
else:
gname = "unknown"
else:
gname = "unknown"
if numericid:
gname = str(fileDict["GID"])
self.entries.append(("-" + self.__getModeString(perm), nreplicas, uname, gname, size, date, name))
def addDirectory(self, name, dirDict, numericid):
"""Pretty print of the file ls output"""
perm = dirDict["Mode"]
date = dirDict["ModificationDate"]
nlinks = 0
size = 0
if "Owner" in dirDict:
uname = dirDict["Owner"]
elif "OwnerDN" in dirDict:
result = Registry.getUsernameForDN(dirDict["OwnerDN"])
if result["OK"]:
uname = result["Value"]
else:
uname = "unknown"
else:
uname = "unknown"
if numericid:
uname = str(dirDict["UID"])
if "OwnerGroup" in dirDict:
gname = dirDict["OwnerGroup"]
elif "OwnerRole" in dirDict:
groups = Registry.getGroupsWithVOMSAttribute("/" + dirDict["OwnerRole"])
if groups:
if len(groups) > 1:
gname = groups[0]
default_group = gConfig.getValue("/Registry/DefaultGroup", "unknown")
if default_group in groups:
gname = default_group
else:
gname = groups[0]
else:
gname = "unknown"
if numericid:
gname = str(dirDict["GID"])
self.entries.append(("d" + self.__getModeString(perm), nlinks, uname, gname, size, date, name))
def addDataset(self, name, datasetDict, numericid):
"""Pretty print of the dataset ls output"""
perm = datasetDict["Mode"]
date = datasetDict["ModificationDate"]
size = datasetDict["TotalSize"]
if "Owner" in datasetDict:
uname = datasetDict["Owner"]
elif "OwnerDN" in datasetDict:
result = Registry.getUsernameForDN(datasetDict["OwnerDN"])
if result["OK"]:
uname = result["Value"]
else:
uname = "unknown"
else:
uname = "unknown"
if numericid:
uname = str(datasetDict["UID"])
gname = "unknown"
if "OwnerGroup" in datasetDict:
gname = datasetDict["OwnerGroup"]
if numericid:
gname = str(datasetDict["GID"])
numberOfFiles = datasetDict["NumberOfFiles"]
self.entries.append(("s" + self.__getModeString(perm), numberOfFiles, uname, gname, size, date, name))
def __getModeString(self, perm):
"""Get string representation of the file/directory mode"""
pstring = ""
if perm & stat.S_IRUSR:
pstring += "r"
else:
pstring += "-"
if perm & stat.S_IWUSR:
pstring += "w"
else:
pstring += "-"
if perm & stat.S_IXUSR:
pstring += "x"
else:
pstring += "-"
if perm & stat.S_IRGRP:
pstring += "r"
else:
pstring += "-"
if perm & stat.S_IWGRP:
pstring += "w"
else:
pstring += "-"
if perm & stat.S_IXGRP:
pstring += "x"
else:
pstring += "-"
if perm & stat.S_IROTH:
pstring += "r"
else:
pstring += "-"
if perm & stat.S_IWOTH:
pstring += "w"
else:
pstring += "-"
if perm & stat.S_IXOTH:
pstring += "x"
else:
pstring += "-"
return pstring
def humanReadableSize(self, num, suffix="B"):
"""Translate file size in bytes to human readable
Powers of 2 are used (1Mi = 2^20 = 1048576 bytes).
"""
num = int(num)
for unit in ["", "Ki", "Mi", "Gi", "Ti", "Pi", "Ei", "Zi"]:
if abs(num) < 1024.0:
return "%3.1f%s%s" % (num, unit, suffix)
num /= 1024.0
return "%.1f%s%s" % (num, "Yi", suffix)
def printListing(self, reverse, timeorder, sizeorder, humanread):
""" """
if timeorder:
if reverse:
self.entries.sort(key=lambda x: x[5])
else:
self.entries.sort(key=lambda x: x[5], reverse=True)
elif sizeorder:
if reverse:
self.entries.sort(key=lambda x: x[4])
else:
self.entries.sort(key=lambda x: x[4], reverse=True)
else:
if reverse:
self.entries.sort(key=lambda x: x[6], reverse=True)
else:
self.entries.sort(key=lambda x: x[6])
# Determine the field widths
wList = [0] * 7
for d in self.entries:
for i in range(7):
if humanread and i == 4:
humanreadlen = len(str(self.humanReadableSize(d[4])))
if humanreadlen > wList[4]:
wList[4] = humanreadlen
else:
if len(str(d[i])) > wList[i]:
wList[i] = len(str(d[i]))
for e in self.entries:
size = e[4]
if humanread:
size = self.humanReadableSize(e[4])
print(str(e[0]), end=" ")
print(str(e[1]).rjust(wList[1]), end=" ")
print(str(e[2]).ljust(wList[2]), end=" ")
print(str(e[3]).ljust(wList[3]), end=" ")
print(str(size).rjust(wList[4]), end=" ")
print(str(e[5]).rjust(wList[5]), end=" ")
print(str(e[6]))
def addSimpleFile(self, name):
"""Add single files to be sorted later"""
self.entries.append(name)
def printOrdered(self):
"""print the ordered list"""
self.entries.sort()
for entry in self.entries:
print(entry)
|
DIRACGrid/DIRAC
|
src/DIRAC/DataManagementSystem/Client/DirectoryListing.py
|
Python
|
gpl-3.0
| 7,586
|
[
"DIRAC"
] |
94bb3f8d6c95317e6adc163f9a7e1cd7c43458271aa150b0417eb94356664e51
|
# -*- coding: latin-1 -*-
# Copyright (C) 2009-2014 CEA/DEN, EDF R&D
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#
# See http://www.salome-platform.org/ or email : webmaster.salome@opencascade.com
#
import os
import GEOM
import geompy
import smesh
import hexablock
import math
import SALOMEDS
k1 = 1
OPT_QUAD_IK = 1
OPT_FIRST = 2
count = 1
def save_schema(doc):
"""
sauvegarde vtk du modele de bloc
"""
global count
file_name = os.path.join(os.environ['TMP'], 'bride' + str(count) + '.vtk')
doc.saveVtk(file_name)
count += 1
pass
def merge_quads(doc, quart, demi, ni1, nj1, ni2, nj2, option=0) :
"""
fusion des quadrangles entre les 2 grilles cylindriques :
"""
prems = False
if option == OPT_FIRST:
prems = True
quad_ik = False
if option == OPT_QUAD_IK:
quad_ik = True
orig = None
if quad_ik:
orig = grille_cyl_quart.getQuadIK(ni1, nj1, k1)
else:
orig = grille_cyl_quart.getQuadJK(ni1, nj1, k1)
dest = grille_cyl_demi.getQuadJK(ni2, nj2, k1)
# JPL le 10/05/2011 :
# closeQuads() n'est pas accessible en python
# a priori fonctionne avec mergeQuads()
## if prems:
if True:
iq1 = 0
if quad_ik:
iq1 = 1
iq3 = 1 - iq1
v1 = dest.getVertex(iq1)
v3 = dest.getVertex(iq3)
v2 = orig.getVertex(0)
v4 = orig.getVertex(1)
doc.mergeQuads(dest, orig, v1, v2, v3, v4)
pass
else:
## doc.closeQuads(dest, orig)
print "closeQuads() : not yet implemented"
pass
return None
BREP_PATH = os.path.expandvars("$HEXABLOCK_ROOT_DIR/bin/salome/bride.brep")
#=============================
# CREATION DOCUMENT
#=============================
doc = hexablock.addDocument()
#=============================
# PARAMETRES
#=============================
height = 1.0
# cylinder grid 1 :
nr1 = 8
na1 = 4
nl1 = 5
dr1 = 1.0
da1 = 45.0 # angle
dl1 = height
# cylinder grid 2 :
nr2 = 3
na2 = 8
nl2 = nl1
dr2 = 0.5
da2 = 180.0 # angle
dl2 = dl1
#=============================
# Creation du modele de blocs
#=============================
# JPL (le 09/05/2011)
# repris de test_bride_abu.cxx (version la plus a jour dans ~/IHMHEXA/Alain/models):
# et de BRIDE.py (Karima) pour les "vraies" coordonn�es :
#=================================================
# Creation des centres des grilles cylindriques
#=================================================
center1 = doc.addVertex(0, 0, height)
center2 = doc.addVertex(6, 0, height)
dx = doc.addVector(height, 0, 0)
dz = doc.addVector(0, 0, height)
# Creation des grilles cylindriques initiales
#============================================
# 1. 1 ere grille (quart) :
#==========================
# JPL (le 10/05/2011) : vu la geometrie, on ne remplit pas le centre :
## grille_cyl_quart = doc.makeCylindrical(orig1, dx, dz, dr_q, 45.0, dl,
## nr_q, na_q, dim_z, True)
grille_cyl_quart = doc.makeCylindrical(center1, dx, dz, dr1, da1, dl1,
nr1, na1, nl1, False)
# temporaire : sauvegarde du modele de blocs :
save_schema(doc)
# fin temporaire
# Elagage :
for nk in range(2, nl1):
for nj in range(na1):
ideb = 2
if nk == nl1 - 1:
ideb = 1
for ni in range(ideb, nr1):
doc.removeHexa(grille_cyl_quart.getHexaIJK(ni, nj, nk))
pass
pass
pass
# temporaire : sauvegarde du modele de blocs :
save_schema(doc)
# fin temporaire
# Semelle :
k0 = 0
for nj in range(na1):
for ni in range(2, nr1):
doc.removeHexa(grille_cyl_quart.getHexaIJK(ni, nj, k0))
pass
pass
# temporaire : sauvegarde du modele de blocs :
save_schema(doc)
# fin temporaire
# @todo JPL : peut-on fusionner les edges du haut des hexaedres du haut du deuxieme
# rang ? (cf. GEOM). Si oui revoir aussi l'association
# 2. 2�me grille (demi) :
#========================
grille_cyl_demi = doc.makeCylindrical(center2, dx, dz, dr2, da2, dl2,
nr2, na2, nl2, True)
# temporaire : sauvegarde du modele de blocs :
save_schema(doc)
# fin temporaire
ni0 = [0, nr2, 2, 1, 0] # en fonction de z (ie : nk)
for nk in range(0, nl2):
for nj in range(na2): # elagage suivant toute la demi-circonference
for ni in range(ni0[nk], nr2):
doc.removeHexa(grille_cyl_demi.getHexaIJK(ni, nj, nk))
pass
pass
pass
# temporaire : sauvegarde du modele de blocs :
save_schema(doc)
# fin temporaire
# 3. creusement des fondations de demi dans quart :
#==================================================
for nj in range(2):
for ni in range(3, nr1 - 1):
doc.removeHexa(grille_cyl_quart.getHexaIJK(ni, nj, k1))
pass
pass
# temporaire : sauvegarde du modele de blocs :
save_schema(doc)
# fin temporaire
# 4. Fusion des bords :
#======================
merge_quads(doc, grille_cyl_quart, grille_cyl_demi, 7, 0, nr2, 0, OPT_FIRST)
merge_quads(doc, grille_cyl_quart, grille_cyl_demi, 7, 1, nr2, 1)
for ni1 in range(2, 6):
merge_quads(doc, grille_cyl_quart, grille_cyl_demi, 8 - ni1, 2, nr2, ni1, OPT_QUAD_IK)
pass
merge_quads(doc, grille_cyl_quart, grille_cyl_demi, 3, 1, nr2, 6)
merge_quads(doc, grille_cyl_quart, grille_cyl_demi, 3, 0, nr2, 7)
# temporaire : sauvegarde du modele de blocs :
save_schema(doc)
# fin temporaire
###########
# Geometry
###########
bride_geom = geompy.ImportFile(BREP_PATH, "BREP")
geompy.addToStudy(bride_geom, "bride_geom")
# parametres de la geometrie :
r1 = 12.0
r1_t = 7.88
r2 = 20.0
r2_t = 2.0
##############
# Association
##############
# association vertex/points de la grille 1
# (tous les vertex qui ne sont pas fusionnes avec ceux de la grille #
# 2)
dz_geom = geompy.MakeVectorDXDYDZ(0., 0., 1.)
# les vertex du cylindre 1 sont crees de bas en haut, en tournant dans
# le sens trigonometrique :
# 6 vertex sont necessaires / axe z (pour associer aux 6 vertices du
# modele) :
z_val = [-1, 1.5, 21.5, 34, 46.5] # nl1 + 1 valeurs
for ni in range(nr1 + 1):
# suivant ni, valeurs des x pour les (nl1 + 1) points (selon l'axe z)
x = []
z = []
nb_z = 0 # nombre de points suivant l'axe z
if ni == 0:
z = z_val
x = [r1_t] * len(z)
pass
elif ni == 1:
z = z_val
x = [r1_t + 2.77] * len(z)
pass
elif ni == 2:
z = z_val[0:-1] # tout sauf le dernier
x_last = r1_t + 2.77
x = [24.0, 24.0, 19.0, (19.0 - x_last)/2 + x_last, x_last] # pour le 4 eme point, moyenne
# entre le 3 eme et le 5 eme
pass
elif ni == 3:
z = z_val[1:3]
x = [24.0, 19.0]
pass
elif ni == 4:
z = z_val[1:3]
x = [26.5, 21.0] # a revoir pour le premier point ??
pass
elif ni == 8:
z = z_val[1:3]
x = [47.5] * 2
pass
else: # ni = 5, 6, 7
z = z_val[1:3]
x = [26.5 + (47.5 - 26.5)/4*(ni - 4)] * 2
pass
pass
nb_z = len(z)
# creation des points pour y = 0 :
vert_grid1_xi = [geompy.MakeVertex(xi, 0, zi) for (xi, zi) in \
zip(x, z)]
# les points suivants sont crees par rotation de PI/16 suivant
# l'axe z / aux precedents :
angle = math.pi/4.0/na1 # PI/4 (45 degres), divise par 4
for j in range(na1):
li = [geompy.MakeRotation(v, dz_geom, angle) for v in vert_grid1_xi[-nb_z:]]
vert_grid1_xi.extend(li)
pass
# ajout des points a l'etude et association :
# les vertex fusionnes ou correspondant a des hexaedres effaces ne
# sont pas pris en compte.
for nj in range(na1 + 1):
for nk in range(nb_z):
if (ni <= 2) or (3 <= ni <= 7 and nj >= na1 - 1) or \
(ni == 8 and (nj == 0 or nj >= na1 - 1)):
v_mod = grille_cyl_quart.getVertexIJK(ni, nj, nk)
v_geo = vert_grid1_xi[nk + nj*nb_z]
geompy.addToStudy(v_geo, "vert_grid1_x" + str(ni) + "_y" + \
str(nj) + "_z" + str(nk))
v_mod.setAssociation(v_geo)
pass
pass
pass
pass
# association vertex/points de la grille 2
# (tous les vertex qui ne sont pas fusionnes avec ceux de la grille #
# 1)
## dz_geom2 = geompy.MakeVectorDXDYDZ(33.5, 0., 1.)
pt_a = geompy.MakeVertex(33.5, 0, 0)
pt_b = geompy.MakeVertex(33.5, 0, 1.)
dz_geom2 = geompy.MakeVector(pt_a, pt_b)
# les vertex du cylindre 2 sont crees de bas en haut, en tournant dans
# le sens trigonometrique :
# REM : pour l'instant on met de cote la partie centrale du cylindre
# (6 vertex selon z) => a faire a la fin. Ils sont d'ailleurs ranges
# apres les autres dans le modele
# 6 vertex sont necessaires / axe z (pour associer aux 6 vertices du
# modele) :
z_val = [-1, 1.5, 21.5, 24, 36, 41.5] # nl2 + 1 valeurs
for ni in range(nr2 + 1):
# suivant ni, valeurs des x pour les (nl1 + 1) points (selon l'axe z)
x = []
z = []
nb_z = 0 # nombre de points suivant l'axe z
if ni == 0:
z = z_val
x = [39.5] * len(z)
pass
elif ni == 1:
z = z_val[1:-1] # tout sauf le dernier et le premier
x = [42.5] * len(z)
pass
elif ni == 2:
z = z_val[1:-2] # tout sauf les 2 derniers et le premier
x = [46.] * len(z)
pass
elif ni == 3:
z = z_val[1:3]
x = [46.7] * len(z) # valeur a revoir ??
pass
pass
nb_z = len(z)
# creation des points pour y = 0 :
vert_grid2_xi = [geompy.MakeVertex(xi, 0, zi) for (xi, zi) in \
zip(x, z)]
# les points suivants sont crees par rotation de PI/16 suivant
# l'axe z / aux precedents :
angle = math.pi/na2 # PI (180 degres), divise par 8
for j in range(na2):
li = [geompy.MakeRotation(v, dz_geom2, angle) for v in vert_grid2_xi[-nb_z:]]
vert_grid2_xi.extend(li)
pass
# ajout des points a l'etude et association :
for nj in range(na2 + 1):
for nk in range(nb_z):
v_mod = grille_cyl_demi.getVertexIJK(ni, nj, nk)
v_geo = vert_grid2_xi[nk + nj*nb_z]
geompy.addToStudy(v_geo, "vert_grid2_x" + str(ni) + "_y" + \
str(nj) + "_z" + str(nk))
v_mod.setAssociation(v_geo)
pass
pass
pass
# association des vertex communs grille1/grille2
# REM : cette etape n'est pas necessaire ? En effet, les vertex ayant
# ete fusionnes, l'association a ete faite avec la grille 2
|
FedoraScientific/salome-hexablock
|
doc/pyplots/bride.py
|
Python
|
lgpl-2.1
| 11,325
|
[
"VTK"
] |
e6fc758693e3ef621ddd6b549fbc33f9cf164dde68419d90a1270fa0960c0980
|
# -*- coding: utf-8 -*-
# Copyright (c) 2015, imageio contributors
# imageio is distributed under the terms of the (new) BSD License.
# flake8: noqa
"""
Imagio is plugin-based. Every supported format is provided with a
plugin. You can write your own plugins to make imageio support
additional formats. And we would be interested in adding such code to the
imageio codebase!
What is a plugin
----------------
In imageio, a plugin provides one or more :class:`.Format` objects, and
corresponding :class:`.Reader` and :class:`.Writer` classes.
Each Format object represents an implementation to read/write a
particular file format. Its Reader and Writer classes do the actual
reading/saving.
The reader and writer objects have a ``request`` attribute that can be
used to obtain information about the read or write :class:`.Request`, such as
user-provided keyword arguments, as well get access to the raw image
data.
Registering
-----------
Strictly speaking a format can be used stand alone. However, to allow
imageio to automatically select it for a specific file, the format must
be registered using ``imageio.formats.add_format()``.
Note that a plugin is not required to be part of the imageio package; as
long as a format is registered, imageio can use it. This makes imageio very
easy to extend.
What methods to implement
--------------------------
Imageio is designed such that plugins only need to implement a few
private methods. The public API is implemented by the base classes.
In effect, the public methods can be given a descent docstring which
does not have to be repeated at the plugins.
For the Format class, the following needs to be implemented/specified:
* The format needs a short name, a description, and a list of file
extensions that are common for the file-format in question.
These ase set when instantiation the Format object.
* Use a docstring to provide more detailed information about the
format/plugin, such as parameters for reading and saving that the user
can supply via keyword arguments.
* Implement ``_can_read(request)``, return a bool.
See also the :class:`.Request` class.
* Implement ``_can_write(request)``, dito.
For the Format.Reader class:
* Implement ``_open(**kwargs)`` to initialize the reader. Deal with the
user-provided keyword arguments here.
* Implement ``_close()`` to clean up.
* Implement ``_get_length()`` to provide a suitable length based on what
the user expects. Can be ``inf`` for streaming data.
* Implement ``_get_data(index)`` to return an array and a meta-data dict.
* Implement ``_get_meta_data(index)`` to return a meta-data dict. If index
is None, it should return the 'global' meta-data.
For the Format.Writer class:
* Implement ``_open(**kwargs)`` to initialize the writer. Deal with the
user-provided keyword arguments here.
* Implement ``_close()`` to clean up.
* Implement ``_append_data(im, meta)`` to add data (and meta-data).
* Implement ``_set_meta_data(meta)`` to set the global meta-data.
"""
# First import plugins that we want to take precedence over freeimage
from . import tifffile
from . import pillow
from . import grab
from . import freeimage
from . import freeimagemulti
from . import ffmpeg
from . import avbin
from . import dicom
from . import npz
from . import swf
from . import feisem # special kind of tiff, uses _tiffile
from . import fits # depends on astropy
from . import simpleitk # depends on SimpleITK
from . import gdal # depends on gdal
from . import example
# Sort
import os
from .. import formats
formats.sort(*os.getenv('IMAGEIO_FORMAT_ORDER', '').split(','))
del os, formats
|
kenshay/ImageScript
|
ProgramData/SystemFiles/Python/Lib/site-packages/imageio/plugins/__init__.py
|
Python
|
gpl-3.0
| 3,684
|
[
"ASE"
] |
fa30f3621c12de4e255d946517a98d37bbb9d19cea0001db3c38e07f51af5fee
|
"""
Helper functions for the course complete event that was originally included with the Badging MVP.
"""
import hashlib
import logging
from django.core.urlresolvers import reverse
from django.utils.text import slugify
from django.utils.translation import ugettext_lazy as _
from badges.models import BadgeAssertion, BadgeClass, CourseCompleteImageConfiguration
from badges.utils import requires_badges_enabled, site_prefix
from xmodule.modulestore.django import modulestore
LOGGER = logging.getLogger(__name__)
# NOTE: As these functions are carry-overs from the initial badging implementation, they are used in
# migrations. Please check the badge migrations when changing any of these functions.
def course_slug(course_key, mode):
"""
Legacy: Not to be used as a model for constructing badge slugs. Included for compatibility with the original badge
type, awarded on course completion.
Slug ought to be deterministic and limited in size so it's not too big for Badgr.
Badgr's max slug length is 255.
"""
# Seven digits should be enough to realistically avoid collisions. That's what git services use.
digest = hashlib.sha256(u"{}{}".format(unicode(course_key), unicode(mode))).hexdigest()[:7]
base_slug = slugify(unicode(course_key) + u'_{}_'.format(mode))[:248]
return base_slug + digest
def badge_description(course, mode):
"""
Returns a description for the earned badge.
"""
if course.end:
return _(u'Completed the course "{course_name}" ({course_mode}, {start_date} - {end_date})').format(
start_date=course.start.date(),
end_date=course.end.date(),
course_name=course.display_name,
course_mode=mode,
)
else:
return _(u'Completed the course "{course_name}" ({course_mode})').format(
course_name=course.display_name,
course_mode=mode,
)
def evidence_url(user_id, course_key):
"""
Generates a URL to the user's Certificate HTML view, along with a GET variable that will signal the evidence visit
event.
"""
return site_prefix() + reverse(
'certificates:html_view', kwargs={'user_id': user_id, 'course_id': unicode(course_key)}) + '?evidence_visit=1'
def criteria(course_key):
"""
Constructs the 'criteria' URL from the course about page.
"""
about_path = reverse('about_course', kwargs={'course_id': unicode(course_key)})
return u'{}{}'.format(site_prefix(), about_path)
def get_completion_badge(course_id, user):
"""
Given a course key and a user, find the user's enrollment mode
and get the Course Completion badge.
"""
from student.models import CourseEnrollment
badge_classes = CourseEnrollment.objects.filter(
user=user, course_id=course_id
).order_by('-is_active')
if not badge_classes:
return None
mode = badge_classes[0].mode
course = modulestore().get_course(course_id)
if not course.issue_badges:
return None
return BadgeClass.get_badge_class(
criteria=criteria(course_id),
description=badge_description(course, mode),
course_id=course_id,
mode=mode,
display_name=course.display_name,
image_file_handle=CourseCompleteImageConfiguration.image_for_mode(mode)
)
@requires_badges_enabled
def course_badge_check(user, course_key):
"""
Takes a GeneratedCertificate instance, and checks to see if a badge exists for this course, creating
it if not, should conditions be right.
"""
if not modulestore().get_course(course_key).issue_badges:
LOGGER.info("Course is not configured to issue badges.")
return
badge_class = get_completion_badge(course_key, user)
if not badge_class:
# We're not configured to make a badge for this course mode.
return
if BadgeAssertion.objects.filter(user=user, badge_class=badge_class):
LOGGER.info("Completion badge already exists for this user on this course.")
# Badge already exists. Skip.
return
evidence = evidence_url(user.id, course_key)
badge_class.award(user, evidence_url=evidence)
|
gymnasium/edx-platform
|
lms/djangoapps/badges/events/course_complete.py
|
Python
|
agpl-3.0
| 4,179
|
[
"VisIt"
] |
632b530f1f3cb0e7fc7ed057418dc6781df1ed579712d368fbdb58caf015c97d
|
""" UserProfileDB class is a front-end to the User Profile Database
"""
from __future__ import print_function
from __future__ import absolute_import
from __future__ import division
__RCSID__ = "$Id$"
import six
import os
import sys
import hashlib
from DIRAC import S_OK, S_ERROR, gLogger, gConfig
from DIRAC.Core.Utilities import Time
from DIRAC.ConfigurationSystem.Client.Helpers import Registry
from DIRAC.Core.Base.DB import DB
class UserProfileDB(DB):
""" UserProfileDB class is a front-end to the User Profile Database
"""
tableDict = {'up_Users': {'Fields': {'Id': 'INTEGER AUTO_INCREMENT NOT NULL',
'UserName': 'VARCHAR(32) NOT NULL',
'LastAccess': 'DATETIME',
},
'PrimaryKey': 'Id',
'UniqueIndexes': {'U': ['UserName']},
'Engine': 'InnoDB',
},
'up_Groups': {'Fields': {'Id': 'INTEGER AUTO_INCREMENT NOT NULL',
'UserGroup': 'VARCHAR(32) NOT NULL',
'LastAccess': 'DATETIME',
},
'PrimaryKey': 'Id',
'UniqueIndexes': {'G': ['UserGroup']},
'Engine': 'InnoDB',
},
'up_VOs': {'Fields': {'Id': 'INTEGER AUTO_INCREMENT NOT NULL',
'VO': 'VARCHAR(32) NOT NULL',
'LastAccess': 'DATETIME',
},
'PrimaryKey': 'Id',
'UniqueIndexes': {'VO': ['VO']},
'Engine': 'InnoDB',
},
'up_ProfilesData': {'Fields': {'UserId': 'INTEGER',
'GroupId': 'INTEGER',
'VOId': 'INTEGER',
'Profile': 'VARCHAR(255) NOT NULL',
'VarName': 'VARCHAR(255) NOT NULL',
'Data': 'BLOB',
'ReadAccess': 'VARCHAR(10) DEFAULT "USER"',
'PublishAccess': 'VARCHAR(10) DEFAULT "USER"',
},
'PrimaryKey': ['UserId', 'GroupId', 'Profile', 'VarName'],
'Indexes': {'ProfileKey': ['UserId', 'GroupId', 'Profile'],
'UserKey': ['UserId'],
},
'Engine': 'InnoDB',
},
'up_HashTags': {'Fields': {'UserId': 'INTEGER',
'GroupId': 'INTEGER',
'VOId': 'INTEGER',
'HashTag': 'VARCHAR(32) NOT NULL',
'TagName': 'VARCHAR(255) NOT NULL',
'LastAccess': 'DATETIME',
},
'PrimaryKey': ['UserId', 'GroupId', 'TagName'],
'Indexes': {'HashKey': ['UserId', 'HashTag']},
'Engine': 'InnoDB',
},
}
def __init__(self):
""" Constructor
"""
self.__permValues = ['USER', 'GROUP', 'VO', 'ALL']
self.__permAttrs = ['ReadAccess', 'PublishAccess']
DB.__init__(self, 'UserProfileDB', 'Framework/UserProfileDB')
retVal = self.__initializeDB()
if not retVal['OK']:
raise Exception("Can't create tables: %s" % retVal['Message'])
def _checkTable(self):
""" Make sure the tables are created
"""
return self.__initializeDB()
def __initializeDB(self):
"""
Create the tables
"""
retVal = self._query("show tables")
if not retVal['OK']:
return retVal
tablesInDB = [t[0] for t in retVal['Value']]
tablesD = {}
if 'up_Users' not in tablesInDB:
tablesD['up_Users'] = self.tableDict['up_Users']
if 'up_Groups' not in tablesInDB:
tablesD['up_Groups'] = self.tableDict['up_Groups']
if 'up_VOs' not in tablesInDB:
tablesD['up_VOs'] = self.tableDict['up_VOs']
if 'up_ProfilesData' not in tablesInDB:
tablesD['up_ProfilesData'] = self.tableDict['up_ProfilesData']
if 'up_HashTags' not in tablesInDB:
tablesD['up_HashTags'] = self.tableDict['up_HashTags']
return self._createTables(tablesD)
def __getUserId(self, userName, insertIfMissing=True):
return self.__getObjId(userName, 'UserName', 'up_Users', insertIfMissing)
def __getGroupId(self, groupName, insertIfMissing=True):
return self.__getObjId(groupName, 'UserGroup', 'up_Groups', insertIfMissing)
def __getVOId(self, voName, insertIfMissing=True):
return self.__getObjId(voName, 'VO', 'up_VOs', insertIfMissing)
def __getObjId(self, objValue, varName, tableName, insertIfMissing=True):
result = self.getFields(tableName, ['Id'], {varName: objValue})
if not result['OK']:
return result
data = result['Value']
if len(data) > 0:
objId = data[0][0]
self.updateFields(tableName, ['LastAccess'], ['UTC_TIMESTAMP()'], {'Id': objId})
return S_OK(objId)
if not insertIfMissing:
return S_ERROR("No entry %s for %s defined in the DB" % (objValue, varName))
result = self.insertFields(tableName, [varName, 'LastAccess'], [objValue, 'UTC_TIMESTAMP()'])
if not result['OK']:
return result
return S_OK(result['lastRowId'])
def getUserGroupIds(self, userName, userGroup, insertIfMissing=True):
result = self.__getUserId(userName, insertIfMissing)
if not result['OK']:
return result
userId = result['Value']
result = self.__getGroupId(userGroup, insertIfMissing)
if not result['OK']:
return result
groupId = result['Value']
userVO = Registry.getVOForGroup(userGroup)
if not userVO:
userVO = "undefined"
result = self.__getVOId(userVO, insertIfMissing)
if not result['OK']:
return result
voId = result['Value']
return S_OK((userId, groupId, voId))
def deleteUserProfile(self, userName, userGroup=False):
"""
Delete the profiles for a user
"""
result = self.__getUserId(userName)
if not result['OK']:
return result
userId = result['Value']
condDict = {'UserId': userId}
if userGroup:
result = self.__getGroupId(userGroup)
if not result['OK']:
return result
groupId = result['Value']
condDict['GroupId'] = groupId
result = self.deleteEntries('up_ProfilesData', condDict)
if not result['OK'] or not userGroup:
return result
return self.deleteEntries('up_Users', {'Id': userId})
def __webProfileUserDataCond(self, userIds, sqlProfileName=False, sqlVarName=False):
condSQL = ['`up_ProfilesData`.UserId=%s' % userIds[0],
'`up_ProfilesData`.GroupId=%s' % userIds[1],
'`up_ProfilesData`.VOId=%s' % userIds[2]]
if sqlProfileName:
condSQL.append('`up_ProfilesData`.Profile=%s' % sqlProfileName)
if sqlVarName:
condSQL.append('`up_ProfilesData`.VarName=%s' % sqlVarName)
return " AND ".join(condSQL)
def __webProfileReadAccessDataCond(self, userIds, ownerIds, sqlProfileName, sqlVarName=False, match=False):
permCondSQL = []
sqlCond = []
if match:
sqlCond.append('`up_ProfilesData`.UserId = %s AND `up_ProfilesData`.GroupId = %s' % (ownerIds[0], ownerIds[1]))
else:
permCondSQL.append(
'`up_ProfilesData`.UserId = %s AND `up_ProfilesData`.GroupId = %s' %
(ownerIds[0], ownerIds[1]))
permCondSQL.append('`up_ProfilesData`.GroupId=%s AND `up_ProfilesData`.ReadAccess="GROUP"' % userIds[1])
permCondSQL.append('`up_ProfilesData`.VOId=%s AND `up_ProfilesData`.ReadAccess="VO"' % userIds[2])
permCondSQL.append('`up_ProfilesData`.ReadAccess="ALL"')
sqlCond.append('`up_ProfilesData`.Profile = %s' % sqlProfileName)
if sqlVarName:
sqlCond.append("`up_ProfilesData`.VarName = %s" % (sqlVarName))
# Perms
sqlCond.append("( ( %s ) )" % " ) OR ( ".join(permCondSQL))
return " AND ".join(sqlCond)
def __parsePerms(self, perms, addMissing=True):
normPerms = {}
for pName in self.__permAttrs:
if not perms or pName not in perms:
if addMissing:
normPerms[pName] = self.__permValues[0]
continue
else:
permVal = perms[pName].upper()
for nV in self.__permValues:
if nV == permVal:
normPerms[pName] = nV
break
if pName not in normPerms and addMissing:
normPerms[pName] = self.__permValues[0]
return normPerms
def retrieveVarById(self, userIds, ownerIds, profileName, varName):
"""
Get a data entry for a profile
"""
result = self._escapeString(profileName)
if not result['OK']:
return result
sqlProfileName = result['Value']
result = self._escapeString(varName)
if not result['OK']:
return result
sqlVarName = result['Value']
sqlCond = self.__webProfileReadAccessDataCond(userIds, ownerIds, sqlProfileName, sqlVarName, True)
# when we retrieve the user profile we have to take into account the user.
selectSQL = "SELECT data FROM `up_ProfilesData` WHERE %s" % sqlCond
result = self._query(selectSQL)
if not result['OK']:
return result
data = result['Value']
if len(data) > 0:
return S_OK(data[0][0])
return S_ERROR("No data for userIds %s profileName %s varName %s" % (userIds, profileName, varName))
def retrieveAllUserVarsById(self, userIds, profileName):
"""
Get a data entry for a profile
"""
result = self._escapeString(profileName)
if not result['OK']:
return result
sqlProfileName = result['Value']
sqlCond = self.__webProfileUserDataCond(userIds, sqlProfileName)
selectSQL = "SELECT varName, data FROM `up_ProfilesData` WHERE %s" % sqlCond
result = self._query(selectSQL)
if not result['OK']:
return result
data = result['Value']
return S_OK(dict(data))
def retrieveUserProfilesById(self, userIds):
"""
Get all profiles and data for a user
"""
sqlCond = self.__webProfileUserDataCond(userIds)
selectSQL = "SELECT Profile, varName, data FROM `up_ProfilesData` WHERE %s" % sqlCond
result = self._query(selectSQL)
if not result['OK']:
return result
data = result['Value']
dataDict = {}
for row in data:
if row[0] not in dataDict:
dataDict[row[0]] = {}
dataDict[row[0]][row[1]] = row[2]
return S_OK(dataDict)
def retrieveVarPermsById(self, userIds, ownerIds, profileName, varName):
"""
Get a data entry for a profile
"""
result = self._escapeString(profileName)
if not result['OK']:
return result
sqlProfileName = result['Value']
result = self._escapeString(varName)
if not result['OK']:
return result
sqlVarName = result['Value']
sqlCond = self.__webProfileReadAccessDataCond(userIds, ownerIds, sqlProfileName, sqlVarName)
selectSQL = "SELECT %s FROM `up_ProfilesData` WHERE %s" % (", ".join(self.__permAttrs), sqlCond)
result = self._query(selectSQL)
if not result['OK']:
return result
data = result['Value']
if len(data) > 0:
permDict = {}
for i in range(len(self.__permAttrs)):
permDict[self.__permAttrs[i]] = data[0][i]
return S_OK(permDict)
return S_ERROR("No data for userIds %s profileName %s varName %s" % (userIds, profileName, varName))
def deleteVarByUserId(self, userIds, profileName, varName):
"""
Remove a data entry for a profile
"""
result = self._escapeString(profileName)
if not result['OK']:
return result
sqlProfileName = result['Value']
result = self._escapeString(varName)
if not result['OK']:
return result
sqlVarName = result['Value']
sqlCond = self.__webProfileUserDataCond(userIds, sqlProfileName, sqlVarName)
selectSQL = "DELETE FROM `up_ProfilesData` WHERE %s" % sqlCond
return self._update(selectSQL)
def storeVarByUserId(self, userIds, profileName, varName, data, perms):
"""
Set a data entry for a profile
"""
sqlInsertValues = []
sqlInsertKeys = []
sqlInsertKeys.append(('UserId', userIds[0]))
sqlInsertKeys.append(('GroupId', userIds[1]))
sqlInsertKeys.append(('VOId', userIds[2]))
result = self._escapeString(profileName)
if not result['OK']:
return result
sqlProfileName = result['Value']
sqlInsertKeys.append(('Profile', sqlProfileName))
result = self._escapeString(varName)
if not result['OK']:
return result
sqlVarName = result['Value']
sqlInsertKeys.append(('VarName', sqlVarName))
result = self._escapeString(data)
if not result['OK']:
return result
sqlInsertValues.append(('Data', result['Value']))
normPerms = self.__parsePerms(perms)
for k in normPerms:
sqlInsertValues.append((k, '"%s"' % normPerms[k]))
sqlInsert = sqlInsertKeys + sqlInsertValues
insertSQL = "INSERT INTO `up_ProfilesData` ( %s ) VALUES ( %s )" % (", ".join([f[0] for f in sqlInsert]),
", ".join([str(f[1]) for f in sqlInsert]))
result = self._update(insertSQL)
if result['OK']:
return result
# If error and not duplicate -> real error
if result['Message'].find("Duplicate entry") == -1:
return result
updateSQL = "UPDATE `up_ProfilesData` SET %s WHERE %s" % (", ".join(["%s=%s" % f for f in sqlInsertValues]),
self.__webProfileUserDataCond(userIds,
sqlProfileName,
sqlVarName))
return self._update(updateSQL)
def setUserVarPermsById(self, userIds, profileName, varName, perms):
result = self._escapeString(profileName)
if not result['OK']:
return result
sqlProfileName = result['Value']
result = self._escapeString(varName)
if not result['OK']:
return result
sqlVarName = result['Value']
nPerms = self.__parsePerms(perms, False)
if not nPerms:
return S_OK()
sqlPerms = ",".join(["%s='%s'" % (k, nPerms[k]) for k in nPerms])
updateSql = "UPDATE `up_ProfilesData` SET %s WHERE %s" % (sqlPerms,
self.__webProfileUserDataCond(userIds,
sqlProfileName,
sqlVarName))
return self._update(updateSql)
def retrieveVar(self, userName, userGroup, ownerName, ownerGroup, profileName, varName):
"""
Get a data entry for a profile
"""
result = self.getUserGroupIds(userName, userGroup)
if not result['OK']:
return result
userIds = result['Value']
result = self.getUserGroupIds(ownerName, ownerGroup)
if not result['OK']:
return result
ownerIds = result['Value']
return self.retrieveVarById(userIds, ownerIds, profileName, varName)
def retrieveUserProfiles(self, userName, userGroup):
"""
Helper for getting data
"""
result = self.getUserGroupIds(userName, userGroup)
if not result['OK']:
return result
userIds = result['Value']
return self.retrieveUserProfilesById(userIds)
def retrieveAllUserVars(self, userName, userGroup, profileName):
"""
Helper for getting data
"""
result = self.getUserGroupIds(userName, userGroup)
if not result['OK']:
return result
userIds = result['Value']
return self.retrieveAllUserVarsById(userIds, profileName)
def retrieveVarPerms(self, userName, userGroup, ownerName, ownerGroup, profileName, varName):
result = self.getUserGroupIds(userName, userGroup)
if not result['OK']:
return result
userIds = result['Value']
result = self.getUserGroupIds(ownerName, ownerGroup, False)
if not result['OK']:
return result
ownerIds = result['Value']
return self.retrieveVarPermsById(userIds, ownerIds, profileName, varName)
def setUserVarPerms(self, userName, userGroup, profileName, varName, perms):
result = self.getUserGroupIds(userName, userGroup)
if not result['OK']:
return result
userIds = result['Value']
return self.setUserVarPermsById(userIds, profileName, varName, perms)
def storeVar(self, userName, userGroup, profileName, varName, data, perms=None):
"""
Helper for setting data
"""
try:
result = self.getUserGroupIds(userName, userGroup)
if not result['OK']:
return result
userIds = result['Value']
return self.storeVarByUserId(userIds, profileName, varName, data, perms=perms)
finally:
pass
def deleteVar(self, userName, userGroup, profileName, varName):
"""
Helper for deleting data
"""
try:
result = self.getUserGroupIds(userName, userGroup)
if not result['OK']:
return result
userIds = result['Value']
return self.deleteVarByUserId(userIds, profileName, varName)
finally:
pass
def __profilesCondGenerator(self, value, varType, initialValue=False):
if isinstance(value, six.string_types):
value = [value]
ids = []
if initialValue:
ids.append(initialValue)
for val in value:
if varType == 'user':
result = self.__getUserId(val, insertIfMissing=False)
elif varType == 'group':
result = self.__getGroupId(val, insertIfMissing=False)
else:
result = self.__getVOId(val, insertIfMissing=False)
if not result['OK']:
continue
ids.append(result['Value'])
if varType == 'user':
fieldName = 'UserId'
elif varType == 'group':
fieldName = 'GroupId'
else:
fieldName = 'VOId'
return "`up_ProfilesData`.%s in ( %s )" % (fieldName, ", ".join([str(iD) for iD in ids]))
def listVarsById(self, userIds, profileName, filterDict=None):
result = self._escapeString(profileName)
if not result['OK']:
return result
sqlProfileName = result['Value']
sqlCond = ["`up_Users`.Id = `up_ProfilesData`.UserId",
"`up_Groups`.Id = `up_ProfilesData`.GroupId",
"`up_VOs`.Id = `up_ProfilesData`.VOId",
self.__webProfileReadAccessDataCond(userIds, userIds, sqlProfileName)]
if filterDict:
fD = {}
for k in filterDict:
fD[k.lower()] = filterDict[k]
filterDict = fD
for k in ('user', 'group', 'vo'):
if k in filterDict:
sqlCond.append(self.__profilesCondGenerator(filterDict[k], k))
sqlVars2Get = ["`up_Users`.UserName", "`up_Groups`.UserGroup", "`up_VOs`.VO", "`up_ProfilesData`.VarName"]
sqlQuery = "SELECT %s FROM `up_Users`, `up_Groups`, `up_VOs`, `up_ProfilesData` WHERE %s" % (", ".join(sqlVars2Get),
" AND ".join(sqlCond))
return self._query(sqlQuery)
def listVars(self, userName, userGroup, profileName, filterDict=None):
result = self.getUserGroupIds(userName, userGroup)
if not result['OK']:
return result
userIds = result['Value']
return self.listVarsById(userIds, profileName, filterDict)
def storeHashTagById(self, userIds, tagName, hashTag=False):
"""
Set a data entry for a profile
"""
if not hashTag:
hashTag = hashlib.md5()
hashTag.update(("%s;%s;%s" % (Time.dateTime(), userIds, tagName)).encode())
hashTag = hashTag.hexdigest()
result = self.insertFields('up_HashTags', ['UserId', 'GroupId', 'VOId', 'TagName', 'HashTag'],
[userIds[0], userIds[1], userIds[2], tagName, hashTag])
if result['OK']:
return S_OK(hashTag)
# If error and not duplicate -> real error
if result['Message'].find("Duplicate entry") == -1:
return result
result = self.updateFields('up_HashTags', ['HashTag'], [hashTag], {'UserId': userIds[0],
'GroupId': userIds[1],
'VOId': userIds[2],
'TagName': tagName})
if not result['OK']:
return result
return S_OK(hashTag)
def retrieveHashTagById(self, userIds, hashTag):
"""
Get a data entry for a profile
"""
result = self.getFields('up_HashTags', ['TagName'], {'UserId': userIds[0],
'GroupId': userIds[1],
'VOId': userIds[2],
'HashTag': hashTag})
if not result['OK']:
return result
data = result['Value']
if len(data) > 0:
return S_OK(data[0][0])
return S_ERROR("No data for combo userId %s hashTag %s" % (userIds, hashTag))
def retrieveAllHashTagsById(self, userIds):
"""
Get a data entry for a profile
"""
result = self.getFields('up_HashTags', ['HashTag', 'TagName'], {'UserId': userIds[0],
'GroupId': userIds[1],
'VOId': userIds[2]})
if not result['OK']:
return result
data = result['Value']
return S_OK(dict(data))
def storeHashTag(self, userName, userGroup, tagName, hashTag=False):
"""
Helper for storing HASH
"""
try:
result = self.getUserGroupIds(userName, userGroup)
if not result['OK']:
return result
userIds = result['Value']
return self.storeHashTagById(userIds, tagName, hashTag)
finally:
pass
def retrieveHashTag(self, userName, userGroup, hashTag):
"""
Helper for retrieving HASH
"""
try:
result = self.getUserGroupIds(userName, userGroup)
if not result['OK']:
return result
userIds = result['Value']
return self.retrieveHashTagById(userIds, hashTag)
finally:
pass
def retrieveAllHashTags(self, userName, userGroup):
"""
Helper for retrieving HASH
"""
try:
result = self.getUserGroupIds(userName, userGroup)
if not result['OK']:
return result
userIds = result['Value']
return self.retrieveAllHashTagsById(userIds)
finally:
pass
def getUserProfileNames(self, permission):
"""
it returns the available profile names by not taking account the permission: ReadAccess and PublishAccess
"""
result = None
permissions = self.__parsePerms(permission, False)
if not permissions:
return S_OK()
condition = ",".join(["%s='%s'" % (k, permissions[k]) for k in permissions])
query = "SELECT distinct Profile from `up_ProfilesData` where %s" % condition
retVal = self._query(query)
if retVal['OK']:
result = S_OK([i[0] for i in retVal['Value']])
else:
result = retVal
return result
def testUserProfileDB():
""" Some test cases
"""
# building up some fake CS values
gConfig.setOptionValue('DIRAC/Setup', 'Test')
gConfig.setOptionValue('/DIRAC/Setups/Test/Framework', 'Test')
host = '127.0.0.1'
user = 'Dirac'
pwd = 'Dirac'
db = 'AccountingDB'
gConfig.setOptionValue('/Systems/Framework/Test/Databases/UserProfileDB/Host', host)
gConfig.setOptionValue('/Systems/Framework/Test/Databases/UserProfileDB/DBName', db)
gConfig.setOptionValue('/Systems/Framework/Test/Databases/UserProfileDB/User', user)
gConfig.setOptionValue('/Systems/Framework/Test/Databases/UserProfileDB/Password', pwd)
db = UserProfileDB()
assert db._connect()['OK']
userName = 'testUser'
userGroup = 'testGroup'
profileName = 'testProfile'
varName = 'testVar'
tagName = 'testTag'
hashTag = '237cadc4af90277e9524e6386e264630'
data = 'testData'
perms = 'USER'
try:
if False:
for tableName in db.tableDict.keys():
result = db._update('DROP TABLE `%s`' % tableName)
assert result['OK']
gLogger.info('\n Creating Table\n')
# Make sure it is there and it has been created for this test
result = db._checkTable()
assert result == {'OK': True, 'Value': None}
result = db._checkTable()
assert result == {'OK': True, 'Value': 0}
gLogger.info('\n Adding some data\n')
result = db.storeVar(userName, userGroup, profileName, varName, data, perms)
assert result['OK']
assert result['Value'] == 1
gLogger.info('\n Some queries\n')
result = db.getUserGroupIds(userName, userGroup)
assert result['OK']
assert result['Value'] == (1, 1, 1)
result = db.listVars(userName, userGroup, profileName)
assert result['OK']
assert result['Value'][0][3] == varName
result = db.retrieveUserProfiles(userName, userGroup)
assert result['OK']
assert result['Value'] == {profileName: {varName: data}}
result = db.storeHashTag(userName, userGroup, tagName, hashTag)
assert result['OK']
assert result['Value'] == hashTag
result = db.retrieveAllHashTags(userName, userGroup)
assert result['OK']
assert result['Value'] == {hashTag: tagName}
result = db.retrieveHashTag(userName, userGroup, hashTag)
assert result['OK']
assert result['Value'] == tagName
gLogger.info('\n OK\n')
except AssertionError:
print('ERROR ', end=' ')
if not result['OK']:
print(result['Message'])
else:
print(result)
sys.exit(1)
if __name__ == '__main__':
from DIRAC.Core.Base import Script
Script.parseCommandLine()
gLogger.setLevel('VERBOSE')
if 'PYTHONOPTIMIZE' in os.environ and os.environ['PYTHONOPTIMIZE']:
gLogger.info('Unset pyhthon optimization "PYTHONOPTIMIZE"')
sys.exit(0)
testUserProfileDB()
|
yujikato/DIRAC
|
src/DIRAC/FrameworkSystem/DB/UserProfileDB.py
|
Python
|
gpl-3.0
| 26,624
|
[
"DIRAC"
] |
780c6b26b01549b22e4c5e0c13daf8c83119fe16b83cee276998dc874ab3fed7
|
# coding: utf-8
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
import unittest
import os
from pymatgen import Structure
from pymatgen.io.feff.sets import MPXANESSet, MPELNESSet, FEFFDictSet, MPEXAFSSet
from pymatgen.io.feff.inputs import Potential, Tags, Atoms, Header
from pymatgen.io.cif import CifParser, CifFile
import shutil
import numpy as np
test_dir = os.path.join(os.path.dirname(__file__), "..", "..", "..", "..",
'test_files')
class FeffInputSetTest(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.header_string = """* This FEFF.inp file generated by pymatgen
TITLE comment: From cif file
TITLE Source: CoO19128.cif
TITLE Structure Summary: Co2 O2
TITLE Reduced formula: CoO
TITLE space group: (P6_3mc), space number: (186)
TITLE abc: 3.297078 3.297078 5.254213
TITLE angles: 90.000000 90.000000 120.000000
TITLE sites: 4
* 1 Co 0.333333 0.666667 0.503676
* 2 Co 0.666667 0.333333 0.003676
* 3 O 0.333333 0.666667 0.121324
* 4 O 0.666667 0.333333 0.621325"""
cif_file = os.path.join(test_dir, 'CoO19128.cif')
cls.structure = CifParser(cif_file).get_structures()[0]
cls.absorbing_atom = 'O'
cls.mp_xanes = MPXANESSet(cls.absorbing_atom, cls.structure)
def test_get_header(self):
comment = 'From cif file'
header = str(self.mp_xanes.header(source='CoO19128.cif', comment=comment))
print(header)
ref = self.header_string.splitlines()
last4 = [" ".join(l.split()[2:]) for l in ref[-4:]]
for i, l in enumerate(header.splitlines()):
if i < 9:
self.assertEqual(l, ref[i])
else:
s = " ".join(l.split()[2:])
self.assertIn(s, last4)
def test_getfefftags(self):
tags = self.mp_xanes.tags.as_dict()
self.assertEqual(tags['COREHOLE'], "FSR",
"Failed to generate PARAMETERS string")
def test_get_feffPot(self):
POT = str(self.mp_xanes.potential)
d, dr = Potential.pot_dict_from_string(POT)
self.assertEqual(d['Co'], 1, "Wrong symbols read in for Potential")
def test_get_feff_atoms(self):
atoms = str(self.mp_xanes.atoms)
self.assertEqual(atoms.splitlines()[3].split()[4], self.absorbing_atom,
"failed to create ATOMS string")
def test_to_and_from_dict(self):
f1_dict = self.mp_xanes.as_dict()
f2 = MPXANESSet.from_dict(f1_dict)
self.assertEqual(f1_dict, f2.as_dict())
def test_user_tag_settings(self):
tags_dict_ans = self.mp_xanes.tags.as_dict()
tags_dict_ans["COREHOLE"] = "RPA"
tags_dict_ans["EDGE"] = "L1"
user_tag_settings = {"COREHOLE": "RPA", "EDGE": "L1"}
mp_xanes_2 = MPXANESSet(self.absorbing_atom, self.structure,
user_tag_settings=user_tag_settings)
self.assertEqual(mp_xanes_2.tags.as_dict(), tags_dict_ans)
def test_eels_to_from_dict(self):
elnes = MPELNESSet(self.absorbing_atom, self.structure, radius=5.0,
beam_energy=100, beam_direction=[1, 0, 0],
collection_angle=7, convergence_angle=6)
elnes_dict = elnes.as_dict()
elnes_2 = MPELNESSet.from_dict(elnes_dict)
self.assertEqual(elnes_dict, elnes_2.as_dict())
def test_eels_tags_set(self):
radius = 5.0
user_eels_settings = {
'ENERGY': '4 0.04 0.1',
'BEAM_ENERGY': '200 1 0 1',
'ANGLES': '2 3'}
elnes = MPELNESSet(self.absorbing_atom, self.structure, radius=radius,
user_eels_settings=user_eels_settings)
elnes_2 = MPELNESSet(self.absorbing_atom, self.structure, radius=radius,
beam_energy=100, beam_direction=[1, 0, 0],
collection_angle=7, convergence_angle=6)
self.assertEqual(elnes.tags["ELNES"]["ENERGY"],
user_eels_settings["ENERGY"])
self.assertEqual(elnes.tags["ELNES"]["BEAM_ENERGY"],
user_eels_settings["BEAM_ENERGY"])
self.assertEqual(elnes.tags["ELNES"]["ANGLES"],
user_eels_settings["ANGLES"])
self.assertEqual(elnes_2.tags["ELNES"]["BEAM_ENERGY"], [100, 0, 1, 1])
self.assertEqual(elnes_2.tags["ELNES"]["BEAM_DIRECTION"], [1, 0, 0])
self.assertEqual(elnes_2.tags["ELNES"]["ANGLES"], [7, 6])
def test_reciprocal_tags_and_input(self):
user_tag_settings = {"RECIPROCAL": "", "KMESH": "1000"}
elnes = MPELNESSet(self.absorbing_atom, self.structure,
user_tag_settings=user_tag_settings)
self.assertTrue("RECIPROCAL" in elnes.tags)
self.assertEqual(elnes.tags["TARGET"], 3)
self.assertEqual(elnes.tags["KMESH"], "1000")
self.assertEqual(elnes.tags["CIF"], "Co2O2.cif")
self.assertEqual(elnes.tags["COREHOLE"], "RPA")
all_input = elnes.all_input()
self.assertNotIn("ATOMS", all_input)
self.assertNotIn("POTENTIALS", all_input)
elnes.write_input()
structure = Structure.from_file("Co2O2.cif")
self.assertTrue(self.structure.matches(structure))
os.remove("HEADER")
os.remove("PARAMETERS")
os.remove("feff.inp")
os.remove("Co2O2.cif")
def test_small_system_EXAFS(self):
exafs_settings = MPEXAFSSet(self.absorbing_atom, self.structure)
self.assertFalse(exafs_settings.small_system)
self.assertTrue('RECIPROCAL' not in exafs_settings.tags)
user_tag_settings = {"RECIPROCAL": ""}
exafs_settings_2 = MPEXAFSSet(self.absorbing_atom, self.structure, nkpts=1000,
user_tag_settings=user_tag_settings)
self.assertFalse(exafs_settings_2.small_system)
self.assertTrue('RECIPROCAL' not in exafs_settings_2.tags)
def test_number_of_kpoints(self):
user_tag_settings = {"RECIPROCAL": ""}
elnes = MPELNESSet(self.absorbing_atom, self.structure, nkpts=1000,
user_tag_settings=user_tag_settings)
self.assertEqual(elnes.tags["KMESH"], [12, 12, 7])
def test_large_systems(self):
struct = Structure.from_file(os.path.join(test_dir, "La4Fe4O12.cif"))
user_tag_settings = {"RECIPROCAL": "", "KMESH": "1000"}
elnes = MPELNESSet("Fe", struct, user_tag_settings=user_tag_settings)
self.assertNotIn("RECIPROCAL", elnes.tags)
self.assertNotIn("KMESH", elnes.tags)
self.assertNotIn("CIF", elnes.tags)
self.assertNotIn("TARGET", elnes.tags)
def test_postfeffset(self):
self.mp_xanes.write_input(os.path.join('.', 'xanes_3'))
feff_dict_input = FEFFDictSet.from_directory(os.path.join('.', 'xanes_3'))
self.assertTrue(feff_dict_input.tags == Tags.from_file(os.path.join('.', 'xanes_3/feff.inp')))
self.assertTrue(str(feff_dict_input.header()) == str(Header.from_file(os.path.join('.', 'xanes_3/HEADER'))))
feff_dict_input.write_input('xanes_3_regen')
origin_tags = Tags.from_file(os.path.join('.', 'xanes_3/PARAMETERS'))
output_tags = Tags.from_file(os.path.join('.', 'xanes_3_regen/PARAMETERS'))
origin_mole = Atoms.cluster_from_file(os.path.join('.', 'xanes_3/feff.inp'))
output_mole = Atoms.cluster_from_file(os.path.join('.', 'xanes_3_regen/feff.inp'))
original_mole_dist = np.array(origin_mole.distance_matrix[0, :]).astype(np.float64)
output_mole_dist = np.array(output_mole.distance_matrix[0, :]).astype(np.float64)
original_mole_shell = [x.species_string for x in origin_mole]
output_mole_shell = [x.species_string for x in output_mole]
self.assertTrue(np.allclose(original_mole_dist, output_mole_dist))
self.assertTrue(origin_tags == output_tags)
self.assertTrue(original_mole_shell == output_mole_shell)
shutil.rmtree(os.path.join('.', 'xanes_3'))
shutil.rmtree(os.path.join('.', 'xanes_3_regen'))
reci_mp_xanes = MPXANESSet(self.absorbing_atom, self.structure,
user_tag_settings={"RECIPROCAL": ""})
reci_mp_xanes.write_input('xanes_reci')
feff_reci_input = FEFFDictSet.from_directory(os.path.join('.', 'xanes_reci'))
self.assertTrue("RECIPROCAL" in feff_reci_input.tags)
feff_reci_input.write_input('Dup_reci')
self.assertTrue(os.path.exists(os.path.join('.', 'Dup_reci', 'HEADER')))
self.assertTrue(os.path.exists(os.path.join('.', 'Dup_reci', 'feff.inp')))
self.assertTrue(os.path.exists(os.path.join('.', 'Dup_reci', 'PARAMETERS')))
self.assertFalse(os.path.exists(os.path.join('.', 'Dup_reci', 'ATOMS')))
self.assertFalse(os.path.exists(os.path.join('.', 'Dup_reci', 'POTENTIALS')))
tags_original = Tags.from_file(os.path.join('.', 'xanes_reci/feff.inp'))
tags_output = Tags.from_file(os.path.join('.', 'Dup_reci/feff.inp'))
self.assertTrue(tags_original == tags_output)
stru_orig = Structure.from_file(os.path.join('.', 'xanes_reci/Co2O2.cif'))
stru_reci = Structure.from_file(os.path.join('.', 'Dup_reci/Co2O2.cif'))
self.assertTrue(stru_orig.__eq__(stru_reci))
shutil.rmtree(os.path.join('.', 'Dup_reci'))
shutil.rmtree(os.path.join('.', 'xanes_reci'))
def test_post_distdiff(self):
feff_dict_input = FEFFDictSet.from_directory(os.path.join(test_dir, 'feff_dist_test'))
self.assertTrue(feff_dict_input.tags == Tags.from_file(os.path.join(test_dir, 'feff_dist_test/feff.inp')))
self.assertTrue(
str(feff_dict_input.header()) == str(Header.from_file(os.path.join(test_dir, 'feff_dist_test/HEADER'))))
feff_dict_input.write_input('feff_dist_regen')
origin_tags = Tags.from_file(os.path.join(test_dir, 'feff_dist_test/PARAMETERS'))
output_tags = Tags.from_file(os.path.join('.', 'feff_dist_regen/PARAMETERS'))
origin_mole = Atoms.cluster_from_file(os.path.join(test_dir, 'feff_dist_test/feff.inp'))
output_mole = Atoms.cluster_from_file(os.path.join('.', 'feff_dist_regen/feff.inp'))
original_mole_dist = np.array(origin_mole.distance_matrix[0, :]).astype(np.float64)
output_mole_dist = np.array(output_mole.distance_matrix[0, :]).astype(np.float64)
original_mole_shell = [x.species_string for x in origin_mole]
output_mole_shell = [x.species_string for x in output_mole]
self.assertTrue(np.allclose(original_mole_dist, output_mole_dist))
self.assertTrue(origin_tags == output_tags)
self.assertTrue(original_mole_shell == output_mole_shell)
shutil.rmtree(os.path.join('.', 'feff_dist_regen'))
if __name__ == '__main__':
unittest.main()
|
gVallverdu/pymatgen
|
pymatgen/io/feff/tests/test_sets.py
|
Python
|
mit
| 10,986
|
[
"FEFF",
"pymatgen"
] |
2d58f533f77a15c431aac71fafc99feedb7082a39345c0629793e06ccd49c51f
|
"""Make plots of monthly values or differences"""
from __future__ import print_function
import calendar
from pandas.io.sql import read_sql
import matplotlib.pyplot as plt
from pyiem.util import get_dbconn
PGCONN = get_dbconn("idep")
def get_scenario(scenario):
df = read_sql(
"""
WITH yearly as (
SELECT huc_12, generate_series(2008, 2016) as yr
from huc12 where states = 'IA' and scenario = 0),
combos as (
SELECT huc_12, yr, generate_series(1, 12) as mo from yearly),
results as (
SELECT r.huc_12, extract(year from valid)::int as yr,
extract(month from valid)::int as mo,
sum(qc_precip) as precip, sum(avg_runoff) as runoff,
sum(avg_delivery) as delivery,
sum(avg_loss) as detachment from results_by_huc12 r
WHERE r.scenario = %s and r.valid >= '2008-01-01'
and r.valid < '2017-01-01' GROUP by r.huc_12, yr, mo),
agg as (
SELECT c.huc_12, c.yr, c.mo, coalesce(r.precip, 0) as precip,
coalesce(r.runoff, 0) as runoff,
coalesce(r.delivery, 0) as delivery,
coalesce(r.detachment, 0) as detachment
from combos c LEFT JOIN results r on (c.huc_12 = r.huc_12 and
c.yr = r.yr and c.mo = r.mo))
select mo,
avg(runoff) / 25.4 as runoff_in,
avg(delivery) * 4.463 as delivery_ta,
avg(detachment) * 4.463 as detachment_ta
from agg GROUP by mo ORDER by mo ASC
""",
PGCONN,
params=(scenario,),
index_col="mo",
)
return df
def main():
"""Go Main"""
adf = get_scenario(0)
b25 = get_scenario(25)
b26 = get_scenario(26)
delta25 = b25 - adf
delta26 = b26 - adf
(fig, ax) = plt.subplots(1, 1)
ax.bar(
delta25.index.values - 0.2,
delta25["delivery_ta"].values,
width=0.4,
label="HI 0.8",
)
ax.bar(
delta26.index.values + 0.2,
delta26["delivery_ta"].values,
width=0.4,
label="HI 0.9",
)
ax.legend(loc="best")
ax.grid(True)
ax.set_title("2008-2016 Change in Delivery vs DEP Baseline")
ax.set_ylabel("Change [tons/acre]")
ax.set_xticks(range(1, 13))
ax.set_xticklabels(calendar.month_abbr[1:])
fig.savefig("test.png")
if __name__ == "__main__":
main()
|
akrherz/idep
|
scripts/biomass/monthly.py
|
Python
|
mit
| 2,391
|
[
"ADF"
] |
7005d32bb7b456e5ffe144f4c3317e65fc22974d967c5539c3b2d09e164f12f4
|
import sys, os, re, argparse, csv
import shutil
class WorkflowParser(object):
def __init__(self, batch_file=None):
self.bf = batch_file
self.read_batch_file()
def read_batch_file(self):
"""
Read lines from batch submit file.
"""
batch_file = self.bf
with open(batch_file) as f:
batch_lines = f.readlines()
self.batch = batch_lines
def get_params(self):
"""
Identify header line with parameter names; store the index and name of
each parameter.
"""
param_line = [l for l in self.batch if 'SampleName' in l][0]
param_dict = {idx: re.sub('##.*', '', p) \
for idx,p in enumerate(param_line.strip().split('\t'))}
self.pd = param_dict
def get_lib_params(self):
if not hasattr(self, 'pd'):
self.get_params()
param_dict = self.pd
lib_param_dict = [{param_dict[i]: p \
for i,p in enumerate(l.strip().split('\t'))} \
for l in self.batch if re.search('lib[0-9]+', l)]
self.lpd = lib_param_dict
def build_out_dict(self):
if not hasattr(self, 'lpd'):
self.get_lib_params()
lib_param_dict = self.lpd
out_file_dict = {pd['SampleName']: {re.sub('_out', '', k): pd[k] \
for k in pd if 'out' in k} \
for pd in lib_param_dict}
self.ofd = out_file_dict
def show_output_files(self):
if not hasattr(self, 'ofd'):
self.build_out_dict()
return self.ofd
class CompileController(object):
def __init__(self, flowcell_dir=None):
self.fc_dir = flowcell_dir
def select_batch(self):
batch_submit_dir = os.path.join(self.fc_dir, "globus_batch_submission")
batch_dates = list(set(f.split('_')[0] for f in os.listdir(batch_submit_dir)))
print "\nFound the following Globus Genomics Galaxy batches:"
for i, d in enumerate(batch_dates):
print "%3d : %s" % (i, d)
batch_i = raw_input("Select the date of flowcell batch to compile: ")
batch_date = batch_dates[int(batch_i)]
self.batch_submit_files = [os.path.join(batch_submit_dir, f)
for f in os.listdir(batch_submit_dir)
if f.split('_')[0] in batch_date]
def go(self):
if not hasattr(self, 'batch_submit_files'):
self.select_batch()
for f in self.batch_submit_files:
rc = ResultCurator(self.fc_dir, f).curate_outputs()
class ResultCurator(object):
def __init__(self, flowcell_dir=None, batch_submit_file=None):
self.fc_dir = flowcell_dir
self.submit_file = batch_submit_file
self.get_workflow()
def get_workflow(self):
batch_workflow = re.search('(?<=optimized_).*(?=.txt)',
self.submit_file).group()
self.workflow = batch_workflow
def get_outputs(self):
output_dict = WorkflowParser(self.submit_file).show_output_files()
self.od = output_dict
def get_project_dir(self, lib=None):
project_dir = os.path.join(self.fc_dir,
re.search('Project_[^/]*',
self.od[lib].get('workflow_log_txt')).group())
return project_dir
def curate_outputs(self):
if not hasattr(self, 'od'):
self.get_outputs()
for idx,lib in enumerate(self.od):
project_dir = self.get_project_dir(lib)
proj_id = re.search('P+[0-9]+(-[0-9]+){,1}', project_dir).group()
print "\nWorkflow: %s" % self.workflow
print (">> Compiling outputs for %s [%s] (%d of %d)\n" %
(lib, proj_id, idx + 1, len(self.od)))
sc = SampleCurator(lib, self.od[lib])
sc.organize_files(self.get_project_dir(lib))
class SampleCurator(object):
def __init__(self, lib_id=None, output_dict=None):
self.lib = lib_id
self.lod = output_dict
def get_result_type(self, output):
output_str = re.sub('_[a-z]+$', '', output)
output_type = re.search('(?<=_)[a-z]+$', output_str)
if output_type:
result_type = output_type.group()
else:
result_type = output_str
return result_type
def get_result_source(self, output):
if not re.search('fastq$', output):
result_sources = ['picard_align', 'picard_markdups', 'picard_rnaseq',
'htseq', 'trinity', 'tophat', 'tophat_stats', 'fastqc',
'workflow_log']
result_source = [ rs for rs in result_sources \
if re.search(rs.lower(), output) ][0]
else:
result_sources = ['fastq', 'trimmed_fastq']
result_source = [ rs for rs in result_sources \
if re.search('^' + rs.lower() + '$', output) ][0]
return result_source
def build_source_dict(self):
output_dict = self.lod
source_dict = {}
for o in output_dict:
rt = self.get_result_type(o)
rs = self.get_result_source(o)
if rs in source_dict:
source_dict[rs][o] = {'file': output_dict[o],
'type': rt}
else:
source_dict[rs] = {o: {'file': output_dict[o],
'type': rt}}
self.sd = source_dict
def organize_files(self, target_dir):
if not hasattr(self, 'sd'):
self.build_source_dict()
for rs in self.sd:
fm = FileMunger(self, target_dir, rs)
fm.go()
class FileMunger(object):
def __init__(self, sample_curator, target_dir, result_source):
self.lib = sample_curator.lib
self.start = target_dir
self.target = target_dir
self.rs = result_source
print " > Result source: %s" % self.rs
self.sod = sample_curator.sd[result_source]
self.prep_output_subdir()
def prep_output_subdir(self):
source_subdir_dict = {'fastqc': os.path.join(self.lib, 'qcR1'),
'picard_align': self.lib + '_qc',
'picard_markdups': self.lib + 'MarkDups',
'picard_rnaseq': self.lib + '_al',
'trinity': self.lib}
if self.rs in source_subdir_dict:
out_subdir = source_subdir_dict[self.rs]
else:
out_subdir = ''
self.subdir = out_subdir
def rename_files(self):
source_output_dict = self.sod
result_file_dict = {'trimmed_fastq': self.lib + '_trimmed.fastq',
'fastqc_qc_html': 'fastqc_report.html',
'fastqc_qc_txt': 'fastqc_data.txt',
'picard_align_metrics_html': 'Picard_Alignment_Summary_Metrics_html.html',
'picard_markdups_metrics_html': 'MarkDups_Dupes_Marked_html.html',
'trinity_fasta': 'Trinity.fasta',
'tophat_stats_metrics_txt': self.lib + 'ths.txt',
'picard_rnaseq_metrics_html': 'RNA_Seq_Metrics_html.html',
'htseq_counts_txt': self.lib + '_count.txt',
'tophat_alignments_bam': self.lib + '.bam',
'htseq_metrics_txt': self.lib + 'mm.txt',
'workflow_log_txt': self.lib + '_workflow_log.txt'}
type_subdir_dict = {'qc': 'QC',
'metrics': 'metrics',
'counts': 'counts',
'alignments': 'alignments',
'trimmed': 'TrimmedFastqs',
'trinity': 'Trinity',
'log': 'logs'}
dirs_to_bundle = []
for idx,o in enumerate(source_output_dict):
print (" (file %d of %d)" %
(idx + 1, len(source_output_dict)))
rf = source_output_dict[o]['file']
rt = source_output_dict[o]['type']
if self.rs is not 'fastq':
out_dir = os.path.join(self.target, type_subdir_dict[rt], self.subdir)
if not os.path.isdir(out_dir):
print " - Creating directory %s" % out_dir
os.makedirs(out_dir)
if len(self.subdir) and not self.rs == 'trinity':
dirs_to_bundle.append(out_dir)
src_file = os.path.join(self.start, rt, os.path.basename(rf))
target_file = os.path.join(out_dir, result_file_dict[o])
if os.path.exists(target_file):
print " - Target file %s already exists" % target_file
elif not os.path.exists(src_file):
print " - Source file %s not found" % src_file
else:
print " - Copying %s to %s" % (src_file, target_file)
shutil.move(src_file, target_file)
self.bundle = list(set(dirs_to_bundle))
def bundle_files(self):
for d in self.bundle:
print " - Zipping up %s" % d
shutil.make_archive(d, 'zip', d)
shutil.rmtree(d)
def go(self):
self.rename_files()
self.bundle_files()
def main(argv):
flowcell_dir = sys.argv[1]
CompileController(flowcell_dir).go()
if __name__ == "__main__":
main(sys.argv[1:])
|
jaeddy/bripipetools
|
scripts/_deprecated/compile_globus_results.py
|
Python
|
mit
| 9,779
|
[
"Galaxy",
"HTSeq"
] |
bdc9b6e17358f4ae734617801eca5382451471401fd926a1de8f353d5cd82250
|
#!/usr/bin/pvpython
import paraview.simple
import os
import tempfile
import shutil
import sys
SRC_DIR = sys.argv[1]
DST_DIR = sys.argv[2]
if not os.path.isdir(DST_DIR):
os.makedirs(DST_DIR)
for filename in os.listdir(SRC_DIR):
print SRC_DIR + "/" + filename
fileName, fileExtension = os.path.splitext(filename)
temp_path = tempfile.mkdtemp()
reader = paraview.simple.OpenDataFile(SRC_DIR + "/" + filename)
writer = paraview.simple.CreateWriter(temp_path + "/" + fileName + ".vtm", reader)
writer.UpdatePipeline()
shutil.move(temp_path + "/" + fileName + "/" + fileName + "_0_0.vts", DST_DIR + "/" + fileName + ".vts")
shutil.rmtree(temp_path)
|
alyupa/multiphase-flow-modeling
|
visualisation/conv.py
|
Python
|
mit
| 662
|
[
"ParaView"
] |
baa36f0524a45623c98199c61f48b1a68085f39f022f166e3668da7e24331748
|
#!/usr/bin/env python
# -*- coding: UTF-8 -*-
#
# Copyright (c) 2015 Steffen Deusch
# Licensed under the MIT license
# MonitorNjus, 28.11.2015 (Version 1.1)
import random # Random Modul importieren
rancolor = ['red', 'indigo', 'blue', 'light-blue', 'cyan', 'teal',
'green', 'light-green', 'amber', 'orange', 'deep-orange', 'blue-grey']
color = random.choice(rancolor) # Zufallsauswahl der Darbe
if color == "red": # Umwandlung des Farbnamens in Hexadezimal
hexa = "#f44336"
elif color == "indigo":
hexa = "#3f51b5"
elif color == "blue":
hexa = "#2196f3"
elif color == "light-blue":
hexa = "#03a9f4"
elif color == "cyan":
hexa = "#00bcd4"
elif color == "teal":
hexa = "#009688"
elif color == "green":
hexa = "#4caf50"
elif color == "light-green":
hexa = "#8bc34a"
# elif color == "lime":
# hexa = "#cddc39"
elif color == "amber":
hexa = "#ffc107"
elif color == "orange":
hexa = "#ff9800"
elif color == "deep-orange":
hexa = "#ff5722"
# elif color == "grey":
# hexa = "#9e9e9e"
elif color == "blue-grey":
hexa = "#607d8b"
else:
hexa == "wtf"
adminstyles = """
<style type="text/css">
.input-field label {
opacity: 0;
}
.secondary-content, .input-field .prefix.active, .input-field input[type=text]:focus + label, .input-field input[type=password]:focus + label, .input-field input[type=email]:focus + label, .input-field input[type=url]:focus + label, .input-field input[type=date]:focus + label, .input-field input[type=tel]:focus + label, .input-field input[type=number]:focus + label, .input-field input[type=search]:focus + label, .input-field textarea:focus.materialize-textarea + label, .dropdown-content li > a, .dropdown-content li > span {
color: """ + hexa + """; opacity: 1;
}
.switch label input[type=checkbox]:first-child:checked + .lever {
background-color: """ + hexa + """; opacity: 1;
}
input[type=text], input[type=number] {
color: grey
}
input[type=text]:focus, input[type=password]:focus, input[type=email]:focus, input[type=url]:focus, input[type=date]:focus, input[type=tel]:focus, input[type=number]:focus, input[type=search]:focus, textarea:focus.materialize-textarea {
border-bottom: 1px solid """ + hexa + """;
-webkit-box-shadow: 0 1px 0 0 """ + hexa + """;
-moz-box-shadow: 0 1px 0 0 """ + hexa + """;
box-shadow: 0 1px 0 0 """ + hexa + """;
color: black;
}
input[type=range]::-webkit-slider-thumb {
background-color: """ + hexa + """;
}
input[type=range]::-moz-range-thumb {
background: """ + hexa + """;
}
input[type=range] + .thumb {
background-color: """ + hexa + """;
}
[type="checkbox"]:checked + label:before {
border-right: 2px solid """ + hexa + """;
border-bottom: 2px solid """ + hexa + """;
}
.btn:hover, .btn-large:hover { background-color: """ + hexa + """; opacity: 1; }
.btn, .btn-large, .btn-floating { background-color: """ + hexa + """; opacity: 0.8; }
</style>"""
|
SteffenDE/monitornjus
|
modules/code/colors.py
|
Python
|
mit
| 3,027
|
[
"Amber"
] |
060e4cf2f713459ef932b934ee09a35eeb9d5bc75499f73b6c9ffa05f6fe0560
|
#!/usr/bin/env python3
import os
import sys
try:
import locale
locale.setlocale(locale.LC_ALL, '')
except:
pass
#
# firefly imports
#
from version_info import VERSION_INFO
from firefly_common import *
from firefly_menu import create_menu
from firefly_starter import Firestarter
from dlg_system import SystemDialog
from mod_browser import Browser
from mod_preview import Preview
from mod_detail import Detail
from mod_rundown import Rundown
from mod_scheduler import Scheduler
#
# Application main window
#
class Firefly(QMainWindow):
def __init__(self, parent):
super(Firefly, self).__init__()
self.setWindowTitle("{}@{} - {}".format(config["rights"]["login"], config["site_name"], VERSION_INFO))
self.setWindowIcon(QIcon(":/images/firefly.ico"))
self.parent = parent
self.docks = []
create_menu(self)
self.setTabPosition(Qt.AllDockWidgetAreas, QTabWidget.North)
self.setDockNestingEnabled(True)
self.setStyleSheet(base_css)
settings = ffsettings()
self.on_change_channel(1) # todo: Load default from settings
self.workspace_locked = settings.value("main_window/locked", False)
for dock_key in settings.allKeys():
if not dock_key.startswith("docks/"):
continue
dock_data = settings.value(dock_key)
parent.splash_message("Loading {} {}".format(dock_data["class"], dock_data["object_name"]))
self.create_dock(dock_data["class"], state=dock_data, show=False)
if settings.contains("main_window/pos"):
self.move(settings.value("main_window/pos"))
if settings.contains("main_window/size"):
self.resize(settings.value("main_window/size"))
self.size_helper = self.size()
if settings.contains("main_window/state"):
self.restoreState(settings.value("main_window/state"))
if self.workspace_locked:
self.lock_workspace()
else:
self.unlock_workspace()
if not settings.contains("main_window/pos") or (settings.contains("main_window/maximized") and int(settings.value("main_window/maximized"))):
self.showMaximized()
else:
self.show()
for dock in self.docks:
dock.show()
self.subscribers = {}
self.seismic_timer = QTimer(self)
self.seismic_timer.timeout.connect(self.on_seismic_timer)
self.seismic_timer.start(40)
def resizeEvent(self, evt):
if not self.isMaximized():
self.size_helper = evt.size()
def create_dock(self, widget_class, state={}, show=True, one_instance=False):
widget, right = {
"browser" : [Browser, False],
"scheduler" : [Scheduler, "scheduler_view"],
"rundown" : [Rundown, "rundown_view"],
"preview" : [Preview, False],
"detail" : [Detail, False]
}[widget_class]
if right and not has_right(right):
logging.warning("Not authorised to show {}".format(widget_class))
return
create = True
if one_instance:
for dock in self.docks:
if dock.class_ == widget_class:
if dock.class_ == "detail":
if dock.hasFocus():
dock.main_widget.switch_tabs()
else:
dock.main_widget.switch_tabs(0)
dock.raise_()
dock.setFocus()
return dock
# Create new dock
QApplication.processEvents()
QApplication.setOverrideCursor(Qt.WaitCursor)
self.docks.append(BaseDock(self, widget, state))
if self.workspace_locked:
self.docks[-1].setAllowedAreas(Qt.NoDockWidgetArea)
else:
self.docks[-1].setAllowedAreas(Qt.AllDockWidgetAreas)
if show:
self.docks[-1].setFloating(True)
self.docks[-1].show()
qr = self.docks[-1].frameGeometry()
cp = QDesktopWidget().availableGeometry().center()
qr.moveCenter(cp)
self.docks[-1].move(qr.topLeft())
# populate dock with asset data from cache
self.push_asset_data(self.docks[-1])
QApplication.restoreOverrideCursor()
return self.docks[-1]
def lock_workspace(self):
for dock in self.docks:
if dock.isFloating():
dock.setAllowedAreas(Qt.NoDockWidgetArea)
else:
dock.setTitleBarWidget(QWidget())
self.workspace_locked = True
def unlock_workspace(self):
wdgt = QDockWidget().titleBarWidget()
for dock in self.docks:
dock.setAllowedAreas(Qt.AllDockWidgetAreas)
if not dock.isFloating():
dock.setTitleBarWidget(wdgt)
self.workspace_locked = False
def closeEvent(self, event):
settings = ffsettings()
settings.remove("main_window")
settings.setValue("main_window/state", self.saveState())
settings.setValue("main_window/pos", self.pos())
settings.setValue("main_window/size", self.size_helper)
settings.setValue("main_window/maximized", int(self.isMaximized()))
settings.setValue("main_window/locked", int(self.workspace_locked))
settings.remove("docks")
for dock in self.docks:
dock.save()
asset_cache.save()
def on_dock_destroyed(self):
for i, dock in enumerate(self.docks):
try:
a = dock.objectName()
except:
del(self.docks[i])
def focus(self, objects):
for d in self.docks:
if d.class_ in ["preview", "detail", "scheduler"] and objects:
d.main_widget.focus(objects)
def focus_rundown(self, id_channel, date, event=False):
dock = self.create_dock("rundown", state={}, show=True, one_instance=True)
dock.main_widget.load(id_channel, date, event)
def on_search(self):
for d in self.docks:
if d.class_ == "browser":
d.main_widget.search_box.setFocus()
d.main_widget.search_box.selectAll()
def on_now(self):
dock = self.create_dock("rundown", state={}, show=True, one_instance=True)
dock.main_widget.on_now()
#
# Menu actions
#
# FILE
def on_new_asset(self):
dock = self.create_dock("detail", state={}, show=True, one_instance=True)
dock.main_widget.new_asset()
def on_clone_asset(self):
dock = self.create_dock("detail", state={}, show=True, one_instance=True)
dock.main_widget.clone_asset()
def on_dlg_system(self):
self.sys_dlg = SystemDialog(self)
self.sys_dlg.exec_()
def on_logout(self):
stat, res = query("logout")
self.close()
def on_exit(self):
self.close()
# VIEW
def on_wnd_browser(self):
self.create_dock("browser")
def on_wnd_preview(self):
self.create_dock("preview", one_instance=True)
def on_wnd_scheduler(self):
self.create_dock("scheduler", one_instance=True)
def on_wnd_rundown(self):
self.create_dock("rundown", one_instance=True)
def on_lock_workspace(self):
if self.workspace_locked:
self.unlock_workspace()
else:
self.lock_workspace()
def on_refresh(self):
for dock in self.docks:
dock.main_widget.refresh()
def on_change_channel(self, id_channel):
self.id_channel = id_channel
for action in self.menu_channel.actions():
if action.id_channel == id_channel:
action.setChecked(True)
for d in self.docks:
if d.class_ in ["rundown", "scheduler"]:
d.main_widget.set_channel(id_channel)
def on_send_message(self):
msg, ok = QInputDialog.getText(self, "Send message", "Text", QLineEdit.Normal)
if ok and msg:
query("message", message=msg)
#
# Status line
#
def log_handler(self, **kwargs):
message_type = kwargs.get("message_type", INFO)
message = kwargs.get("message", "")
if not message:
return
if message_type == WARNING:
QMessageBox.warning(self, "Warning", message)
elif message_type== ERROR:
QMessageBox.critical(self, "Error", message)
else:
self.statusBar().showMessage(message, 10000)
#
# Seismic
#
def on_seismic_timer(self):
try:
msg = self.parent.listener.queue.pop(0)
except IndexError:
pass
else:
self.handle_messaging(msg)
def handle_messaging(self, data):
if data.method == "objects_changed" and data.data["object_type"] == "asset":
aids = [aid for aid in data.data["objects"] if aid in asset_cache.keys()]
if aids:
logging.info("{} has been changed by {}".format(asset_cache[aids[0]], data.data.get("user", "anonymous")) )
self.update_assets(aids)
if data.method == "message" and data.data["sender"] != AUTH_KEY:
QMessageBox.information(self, "Message", "Message from {}\n\n{}".format(data.data["from_user"], data.data["message"]))
return
elif data.method == "firefly_shutdown":
logging.warning("Remote shutdown")
sys.exit(0)
for dock in self.docks:
if dock.class_ == "rundown" and data.method in ["playout_status", "job_progress", "objects_changed"]:
pass
elif dock.class_ in ["detail", "scheduler"] and data.method == "objects_changed":
pass
else:
continue # cool construction, isn't it?
dock.main_widget.seismic_handler(data)
for subscriber in self.subscribers:
if data.method in self.subscribers[subscriber]:
subscriber(data)
def subscribe(self, handler, *methods):
"""subscribe dialogs and other (non-dock) windows to seismic"""
self.subscribers[handler] = methods
def unsubscribe(self, handler):
del self.subscribers[handler]
#
# Asset caching
#
def update_assets(self, asset_ids=[]):
# Call this if you want to update asset cache
res, adata = query("get_assets", handler=self.update_assets_handler , asset_ids=asset_ids)
for dock in self.docks:
self.push_asset_data(dock)
def update_assets_handler(self, data):
# Handler for asset data comming from get_assets query
a = Asset(from_data=data)
asset_cache[a.id] = a
def push_asset_data(self, dock):
# Push asset data to dock which need it
if dock.class_ in ["rundown", "browser"]:
dock.main_widget.model.refresh_assets(asset_cache.keys())
if __name__ == "__main__":
app = Firestarter(Firefly)
app.start()
|
opennx/nx.client
|
firefly.py
|
Python
|
gpl-3.0
| 11,149
|
[
"Firefly"
] |
069178c358fdaada6d52e87b4559fd65cd8f9868e91313b1f00a1b60255063b2
|
#!/usr/bin/env python
########################################################################
# File : dirac-wms-job-logging-info
# Author : Stuart Paterson
########################################################################
"""
Retrieve history of transitions for a DIRAC job
Usage:
dirac-wms-job-logging-info [options] ... JobID ...
Arguments:
JobID: DIRAC Job ID
Example:
$ dirac-wms-job-logging-info 1
Status MinorStatus ApplicationStatus DateTime
Received Job accepted Unknown 2011-02-14 10:12:40
Received False Unknown 2011-02-14 11:03:12
Checking JobSanity Unknown 2011-02-14 11:03:12
Checking JobScheduling Unknown 2011-02-14 11:03:12
Waiting Pilot Agent Submission Unknown 2011-02-14 11:03:12
Matched Assigned Unknown 2011-02-14 11:27:17
Matched Job Received by Agent Unknown 2011-02-14 11:27:27
Matched Submitted To CE Unknown 2011-02-14 11:27:38
Running Job Initialization Unknown 2011-02-14 11:27:42
Running Application Unknown 2011-02-14 11:27:48
Completed Application Finished Successfully Unknown 2011-02-14 11:28:01
Completed Uploading Output Sandbox Unknown 2011-02-14 11:28:04
Completed Output Sandbox Uploaded Unknown 2011-02-14 11:28:07
Done Execution Complete Unknown 2011-02-14 11:28:07
"""
from __future__ import print_function
from __future__ import absolute_import
from __future__ import division
__RCSID__ = "$Id$"
import DIRAC
from DIRAC.Core.Base import Script
from DIRAC.Core.Utilities.DIRACScript import DIRACScript
@DIRACScript()
def main():
Script.parseCommandLine(ignoreErrors=True)
args = Script.getPositionalArgs()
if len(args) < 1:
Script.showHelp(exitCode=1)
from DIRAC.Interfaces.API.Dirac import Dirac, parseArguments
dirac = Dirac()
exitCode = 0
errorList = []
for job in parseArguments(args):
result = dirac.getJobLoggingInfo(job, printOutput=True)
if not result['OK']:
errorList.append((job, result['Message']))
exitCode = 2
for error in errorList:
print("ERROR %s: %s" % error)
DIRAC.exit(exitCode)
if __name__ == "__main__":
main()
|
yujikato/DIRAC
|
src/DIRAC/Interfaces/scripts/dirac_wms_job_logging_info.py
|
Python
|
gpl-3.0
| 3,016
|
[
"DIRAC"
] |
e60fa84267ecb92a70ebb16145fef02268efa96ec4e09cbc07510db990d4e9bb
|
# -*- coding: utf-8 -*-
# ------------------------------------------------------------
# streamondemand.- XBMC Plugin
# Canale per toointalia
# http://www.mimediacenter.info/foro/viewforum.php?f=36
# ------------------------------------------------------------
import re
from core import config
from core import logger
from core import scrapertools
from core import servertools
from core.item import Item
from core.tmdb import infoSod
__channel__ = "toonitalia"
__category__ = "A"
__type__ = "generic"
__title__ = "Toonitalia"
__language__ = "IT"
host = "http://toonitalia.altervista.org"
headers = [
['User-Agent', 'Mozilla/5.0 (Windows NT 6.1; rv:38.0) Gecko/20100101 Firefox/38.0'],
['Accept-Encoding', 'gzip, deflate']
]
DEBUG = config.get_setting("debug")
def isGeneric():
return True
def mainlist(item):
logger.info("streamondemand.toointalia mainlist")
itemlist = [Item(channel=__channel__,
title="[COLOR azure]Home[/COLOR]",
action="anime",
url=host,
thumbnail="http://i.imgur.com/a8Vwz1V.png"),
Item(channel=__channel__,
title="[COLOR azure]Anime[/COLOR]",
action="anime",
url=host + "/category/anime/",
thumbnail="http://i.imgur.com/a8Vwz1V.png"),
Item(channel=__channel__,
title="[COLOR azure]Anime Sub-Ita[/COLOR]",
action="anime",
url=host + "/category/anime-sub-ita/",
thumbnail="http://i.imgur.com/a8Vwz1V.png"),
Item(channel=__channel__,
title="[COLOR azure]Film Animazione[/COLOR]",
action="animazione",
url="%s/category/film-animazione/" % host,
thumbnail="http://i.imgur.com/a8Vwz1V.png"),
Item(channel=__channel__,
title="[COLOR azure]Serie TV[/COLOR]",
action="anime",
url=host + "/category/serie-tv/",
thumbnail="http://i.imgur.com/a8Vwz1V.png"),
Item(channel=__channel__,
title="[COLOR yellow]Cerca...[/COLOR]",
action="search",
extra="anime",
thumbnail="http://dc467.4shared.com/img/fEbJqOum/s7/13feaf0c8c0/Search")]
return itemlist
def search(item, texto):
logger.info("[toonitalia.py] " + item.url + " search " + texto)
item.url = host + "/?s=" + texto
try:
return anime(item)
# Se captura la excepción, para no interrumpir al buscador global si un canal falla
except:
import sys
for line in sys.exc_info():
logger.error("%s" % line)
return []
def anime(item):
logger.info("streamondemand.toointalia peliculas")
itemlist = []
## Descarga la pagina
data = scrapertools.cache_page(item.url)
## Extrae las entradas (carpetas)
patron = '<figure class="post-image left">\s*<a href="([^"]+)"><img src="[^"]*"[^l]+lt="([^"]+)" /></a>\s*</figure>'
matches = re.compile(patron, re.DOTALL).findall(data)
for scrapedurl, scrapedtitle in matches:
title = scrapertools.decodeHtmlentities(scrapedtitle)
scrapedthumbnail = ""
itemlist.append(infoSod(
Item(channel=__channel__,
action="episodi",
title=title,
url=scrapedurl,
thumbnail=scrapedthumbnail,
fulltitle=title,
show=title,
viewmode="movie_with_plot"), tipo='tv'))
# Older Entries
patron = '<link rel="next" href="([^"]+)" />'
next_page = scrapertools.find_single_match(data, patron)
if next_page != "":
itemlist.append(
Item(channel=__channel__,
title="[COLOR orange]Post più vecchi...[/COLOR]",
url=next_page,
action="anime",
thumbnail="http://2.bp.blogspot.com/-fE9tzwmjaeQ/UcM2apxDtjI/AAAAAAAAeeg/WKSGM2TADLM/s1600/pager+old.png"))
return itemlist
def animazione(item):
logger.info("streamondemand.toointalia peliculas")
itemlist = []
## Descarga la pagina
data = scrapertools.cache_page(item.url)
## Extrae las entradas (carpetas)
patron = '<figure class="post-image left">\s*<a href="([^"]+)"><img src="[^"]*"[^l]+lt="([^"]+)" /></a>\s*</figure>'
matches = re.compile(patron, re.DOTALL).findall(data)
for scrapedurl, scrapedtitle in matches:
title = scrapertools.decodeHtmlentities(scrapedtitle)
scrapedthumbnail = ""
itemlist.append(infoSod(
Item(channel=__channel__,
action="film",
title=title,
url=scrapedurl,
thumbnail=scrapedthumbnail,
fulltitle=title,
show=title,
viewmode="movie_with_plot"), tipo='movie'))
# Older Entries
patron = '<link rel="next" href="([^"]+)" />'
next_page = scrapertools.find_single_match(data, patron)
if next_page != "":
itemlist.append(
Item(channel=__channel__,
title="[COLOR orange]Post più vecchi...[/COLOR]",
url=next_page,
action="animazione",
thumbnail="http://2.bp.blogspot.com/-fE9tzwmjaeQ/UcM2apxDtjI/AAAAAAAAeeg/WKSGM2TADLM/s1600/pager+old.png"))
return itemlist
def episodi(item):
logger.info("toonitalia.py episodi")
itemlist = []
# Downloads page
data = scrapertools.cache_page(item.url)
# Extracts the entries
patron = '<a\s*href="([^"]+)"\s*target="_blank">([^<]+)</a><'
matches = re.compile(patron, re.DOTALL).findall(data)
for scrapedurl, scrapedtitle in matches:
if 'adf.ly' not in scrapedurl:
scrapedtitle = scrapertools.decodeHtmlentities(scrapedtitle)
itemlist.append(
Item(channel=__channel__,
action="findvid",
title=scrapedtitle,
thumbnail=item.thumbnail,
url=scrapedurl))
return itemlist
def film(item):
logger.info("toonitalia.py film")
itemlist = []
# Downloads page
data = scrapertools.cache_page(item.url)
# Extracts the entries
# patron = '<img class="aligncenter.*?src="([^"]+)" alt="([^"]+)".*?<strong><a href="([^"]+)" target="_blank">'
patron = '<img.*?src="([^"]+)".*?alt="([^"]+)".*?strong><a href="([^"]+)" target="_blank">'
matches = re.compile(patron, re.DOTALL).findall(data)
for scrapedthumbnail, scrapedtitle, scrapedurl in matches:
scrapedtitle = scrapertools.decodeHtmlentities(scrapedtitle)
itemlist.append(
Item(channel=__channel__,
action="findvid",
title=scrapedtitle,
thumbnail=scrapedthumbnail,
url=scrapedurl))
# Older Entries
patron = '<link rel="next" href="([^"]+)" />'
next_page = scrapertools.find_single_match(data, patron)
if next_page != "":
itemlist.append(
Item(channel=__channel__,
title="[COLOR orange]Post più vecchi...[/COLOR]",
url=next_page,
action="film",
thumbnail="http://2.bp.blogspot.com/-fE9tzwmjaeQ/UcM2apxDtjI/AAAAAAAAeeg/WKSGM2TADLM/s1600/pager+old.png"))
return itemlist
def findvid(item):
logger.info("[toonitalia.py] findvideos")
itemlist = servertools.find_video_items(data=item.url)
for videoitem in itemlist:
videoitem.title = item.title + videoitem.title
videoitem.fulltitle = item.fulltitle
videoitem.thumbnail = item.thumbnail
videoitem.channel = __channel__
return itemlist
|
costadorione/purestream
|
channels/toonitalia.py
|
Python
|
gpl-3.0
| 7,937
|
[
"ADF"
] |
08f1a4f944f2372c51651015cc20fa7abc95c786e1e4b50e1a890e2145062508
|
# import libraries
import matplotlib
matplotlib.use('Agg')
import mdtraj as md
import matplotlib.pyplot as plt
import numpy as np
from msmbuilder import dataset
import seaborn as sns
sns.set_style("whitegrid")
sns.set_context("poster")
#Load trajectory with ensembler models
t_models = md.load("../ensembler-models/traj-refine_implicit_md.xtc", top = "../ensembler-models/topol-renumbered-implicit.pdb")
#define 'difference' as hydrogen bond distance
k295e310 = md.compute_contacts(t_models, [[28,43]])
e310r409 = md.compute_contacts(t_models, [[43,142]])
difference = e310r409[0] - k295e310[0]
#define 'rmsd' as RMSD of activation loop from 2SRC structure
SRC2 = md.load("../reference-structures/SRC_2SRC_A.pdb")
Activation_Loop_SRC2 = [atom.index for atom in SRC2.topology.atoms if (138 <= atom.residue.index <= 158)]
Activation_Loop_Src = [atom.index for atom in t_models.topology.atoms if (138 <= atom.residue.index <= 158)]
SRC2.atom_slice(Activation_Loop_SRC2)
t_models.atom_slice(Activation_Loop_Src)
difference = difference[:,0]
rmsd = md.rmsd(t_models,SRC2,frame=0)
#plot
#plt.plot(rmsd, difference, 'o', markersize=5, label="ensembler models", color='black')
sns.kdeplot(rmsd,difference,shade=True,log=True)
plt.xlabel('RMSD Activation Loop (nm)')
plt.ylabel('d(E310-R409) - d(K295-E310) (nm)')
plt.ylim(-2,2)
plt.xlim(0.3,1.0)
plt.savefig('plot_conf_src_ensembler_density.png')
|
choderalab/MSMs
|
plots/plotting_conformations_ensembler_models.py
|
Python
|
gpl-2.0
| 1,404
|
[
"MDTraj"
] |
aeb15087d27cc1c6c3438a9cc4ffe637c73da091646c5212fafa60e8d174e542
|
"""
Unit tests for enrollment methods in views.py
"""
from mock import patch
from django.test.utils import override_settings
from django.contrib.auth.models import User
from django.core.urlresolvers import reverse
from courseware.tests.helpers import LoginEnrollmentTestCase
from courseware.tests.modulestore_config import TEST_DATA_MIXED_MODULESTORE
from xmodule.modulestore.tests.factories import CourseFactory
from student.tests.factories import UserFactory, CourseEnrollmentFactory, AdminFactory
from xmodule.modulestore.tests.django_utils import ModuleStoreTestCase
from student.models import CourseEnrollment, CourseEnrollmentAllowed
from instructor.views.legacy import get_and_clean_student_list, send_mail_to_student
from django.core import mail
USER_COUNT = 4
@override_settings(MODULESTORE=TEST_DATA_MIXED_MODULESTORE)
class TestInstructorEnrollsStudent(ModuleStoreTestCase, LoginEnrollmentTestCase):
"""
Check Enrollment/Unenrollment with/without auto-enrollment on activation and with/without email notification
"""
def setUp(self):
instructor = AdminFactory.create()
self.client.login(username=instructor.username, password='test')
self.course = CourseFactory.create()
self.users = [
UserFactory.create(username="student%d" % i, email="student%d@test.com" % i)
for i in xrange(USER_COUNT)
]
for user in self.users:
CourseEnrollmentFactory.create(user=user, course_id=self.course.id)
# Empty the test outbox
mail.outbox = []
def test_unenrollment_email_off(self):
"""
Do un-enrollment email off test
"""
course = self.course
# Run the Un-enroll students command
url = reverse('instructor_dashboard_legacy', kwargs={'course_id': course.id})
response = self.client.post(
url,
{
'action': 'Unenroll multiple students',
'multiple_students': 'student0@test.com student1@test.com'
}
)
# Check the page output
self.assertContains(response, '<td>student0@test.com</td>')
self.assertContains(response, '<td>student1@test.com</td>')
self.assertContains(response, '<td>un-enrolled</td>')
# Check the enrollment table
user = User.objects.get(email='student0@test.com')
self.assertFalse(CourseEnrollment.is_enrolled(user, course.id))
user = User.objects.get(email='student1@test.com')
self.assertFalse(CourseEnrollment.is_enrolled(user, course.id))
# Check the outbox
self.assertEqual(len(mail.outbox), 0)
def test_enrollment_new_student_autoenroll_on_email_off(self):
"""
Do auto-enroll on, email off test
"""
course = self.course
# Run the Enroll students command
url = reverse('instructor_dashboard_legacy', kwargs={'course_id': course.id})
response = self.client.post(url, {'action': 'Enroll multiple students', 'multiple_students': 'student1_1@test.com, student1_2@test.com', 'auto_enroll': 'on'})
# Check the page output
self.assertContains(response, '<td>student1_1@test.com</td>')
self.assertContains(response, '<td>student1_2@test.com</td>')
self.assertContains(response, '<td>user does not exist, enrollment allowed, pending with auto enrollment on</td>')
# Check the outbox
self.assertEqual(len(mail.outbox), 0)
# Check the enrollmentallowed db entries
cea = CourseEnrollmentAllowed.objects.filter(email='student1_1@test.com', course_id=course.id)
self.assertEqual(1, cea[0].auto_enroll)
cea = CourseEnrollmentAllowed.objects.filter(email='student1_2@test.com', course_id=course.id)
self.assertEqual(1, cea[0].auto_enroll)
# Check there is no enrollment db entry other than for the other students
ce = CourseEnrollment.objects.filter(course_id=course.id, is_active=1)
self.assertEqual(4, len(ce))
# Create and activate student accounts with same email
self.student1 = 'student1_1@test.com'
self.password = 'bar'
self.create_account('s1_1', self.student1, self.password)
self.activate_user(self.student1)
self.student2 = 'student1_2@test.com'
self.create_account('s1_2', self.student2, self.password)
self.activate_user(self.student2)
# Check students are enrolled
user = User.objects.get(email='student1_1@test.com')
self.assertTrue(CourseEnrollment.is_enrolled(user, course.id))
user = User.objects.get(email='student1_2@test.com')
self.assertTrue(CourseEnrollment.is_enrolled(user, course.id))
def test_repeat_enroll(self):
"""
Try to enroll an already enrolled student
"""
course = self.course
url = reverse('instructor_dashboard_legacy', kwargs={'course_id': course.id})
response = self.client.post(url, {'action': 'Enroll multiple students', 'multiple_students': 'student0@test.com', 'auto_enroll': 'on'})
self.assertContains(response, '<td>student0@test.com</td>')
self.assertContains(response, '<td>already enrolled</td>')
def test_enrollmemt_new_student_autoenroll_off_email_off(self):
"""
Do auto-enroll off, email off test
"""
course = self.course
# Run the Enroll students command
url = reverse('instructor_dashboard_legacy', kwargs={'course_id': course.id})
response = self.client.post(url, {'action': 'Enroll multiple students', 'multiple_students': 'student2_1@test.com, student2_2@test.com'})
# Check the page output
self.assertContains(response, '<td>student2_1@test.com</td>')
self.assertContains(response, '<td>student2_2@test.com</td>')
self.assertContains(response, '<td>user does not exist, enrollment allowed, pending with auto enrollment off</td>')
# Check the outbox
self.assertEqual(len(mail.outbox), 0)
# Check the enrollmentallowed db entries
cea = CourseEnrollmentAllowed.objects.filter(email='student2_1@test.com', course_id=course.id)
self.assertEqual(0, cea[0].auto_enroll)
cea = CourseEnrollmentAllowed.objects.filter(email='student2_2@test.com', course_id=course.id)
self.assertEqual(0, cea[0].auto_enroll)
# Check there is no enrollment db entry other than for the setup instructor and students
ce = CourseEnrollment.objects.filter(course_id=course.id, is_active=1)
self.assertEqual(4, len(ce))
# Create and activate student accounts with same email
self.student = 'student2_1@test.com'
self.password = 'bar'
self.create_account('s2_1', self.student, self.password)
self.activate_user(self.student)
self.student = 'student2_2@test.com'
self.create_account('s2_2', self.student, self.password)
self.activate_user(self.student)
# Check students are not enrolled
user = User.objects.get(email='student2_1@test.com')
self.assertFalse(CourseEnrollment.is_enrolled(user, course.id))
user = User.objects.get(email='student2_2@test.com')
self.assertFalse(CourseEnrollment.is_enrolled(user, course.id))
def test_get_and_clean_student_list(self):
"""
Clean user input test
"""
string = "abc@test.com, def@test.com ghi@test.com \n \n jkl@test.com \n mno@test.com "
cleaned_string, cleaned_string_lc = get_and_clean_student_list(string)
self.assertEqual(cleaned_string, ['abc@test.com', 'def@test.com', 'ghi@test.com', 'jkl@test.com', 'mno@test.com'])
def test_enrollment_email_on(self):
"""
Do email on enroll test
"""
course = self.course
# Create activated, but not enrolled, user
UserFactory.create(username="student3_0", email="student3_0@test.com", first_name='Autoenrolled')
url = reverse('instructor_dashboard_legacy', kwargs={'course_id': course.id})
response = self.client.post(url, {'action': 'Enroll multiple students', 'multiple_students': 'student3_0@test.com, student3_1@test.com, student3_2@test.com', 'auto_enroll': 'on', 'email_students': 'on'})
# Check the page output
self.assertContains(response, '<td>student3_0@test.com</td>')
self.assertContains(response, '<td>student3_1@test.com</td>')
self.assertContains(response, '<td>student3_2@test.com</td>')
self.assertContains(response, '<td>added, email sent</td>')
self.assertContains(response, '<td>user does not exist, enrollment allowed, pending with auto enrollment on, email sent</td>')
# Check the outbox
self.assertEqual(len(mail.outbox), 3)
self.assertEqual(
mail.outbox[0].subject,
'You have been enrolled in Robot Super Course'
)
self.assertEqual(
mail.outbox[0].body,
"Dear Autoenrolled Test\n\nYou have been enrolled in Robot Super Course "
"at edx.org by a member of the course staff. "
"The course should now appear on your edx.org dashboard.\n\n"
"To start accessing course materials, please visit "
"https://edx.org/courses/MITx/999/Robot_Super_Course/\n\n"
"----\nThis email was automatically sent from edx.org to Autoenrolled Test"
)
self.assertEqual(
mail.outbox[1].subject,
'You have been invited to register for Robot Super Course'
)
self.assertEqual(
mail.outbox[1].body,
"Dear student,\n\nYou have been invited to join "
"Robot Super Course at edx.org by a member of the "
"course staff.\n\n"
"To finish your registration, please visit "
"https://edx.org/register and fill out the registration form "
"making sure to use student3_1@test.com in the E-mail field.\n"
"Once you have registered and activated your account, you will "
"see Robot Super Course listed on your dashboard.\n\n"
"----\nThis email was automatically sent from edx.org to "
"student3_1@test.com"
)
def test_unenrollment_email_on(self):
"""
Do email on unenroll test
"""
course = self.course
# Create invited, but not registered, user
cea = CourseEnrollmentAllowed(email='student4_0@test.com', course_id=course.id)
cea.save()
url = reverse('instructor_dashboard_legacy', kwargs={'course_id': course.id})
response = self.client.post(url, {'action': 'Unenroll multiple students', 'multiple_students': 'student4_0@test.com, student2@test.com, student3@test.com', 'email_students': 'on'})
# Check the page output
self.assertContains(response, '<td>student2@test.com</td>')
self.assertContains(response, '<td>student3@test.com</td>')
self.assertContains(response, '<td>un-enrolled, email sent</td>')
# Check the outbox
self.assertEqual(len(mail.outbox), 3)
self.assertEqual(
mail.outbox[0].subject,
'You have been un-enrolled from Robot Super Course'
)
self.assertEqual(
mail.outbox[0].body,
"Dear Student,\n\nYou have been un-enrolled from course "
"Robot Super Course by a member of the course staff. "
"Please disregard the invitation previously sent.\n\n"
"----\nThis email was automatically sent from edx.org "
"to student4_0@test.com"
)
self.assertEqual(
mail.outbox[1].subject,
'You have been un-enrolled from Robot Super Course'
)
def test_send_mail_to_student(self):
"""
Do invalid mail template test
"""
d = {'message': 'message_type_that_doesn\'t_exist'}
send_mail_ret = send_mail_to_student('student0@test.com', d)
self.assertFalse(send_mail_ret)
@patch('instructor.views.legacy.uses_shib')
def test_enrollment_email_on_shib_on(self, mock_uses_shib):
# Do email on enroll, shibboleth on test
course = self.course
mock_uses_shib.return_value = True
# Create activated, but not enrolled, user
UserFactory.create(username="student5_0", email="student5_0@test.com", first_name="ShibTest", last_name="Enrolled")
url = reverse('instructor_dashboard_legacy', kwargs={'course_id': course.id})
response = self.client.post(url, {'action': 'Enroll multiple students', 'multiple_students': 'student5_0@test.com, student5_1@test.com', 'auto_enroll': 'on', 'email_students': 'on'})
# Check the page output
self.assertContains(response, '<td>student5_0@test.com</td>')
self.assertContains(response, '<td>student5_1@test.com</td>')
self.assertContains(response, '<td>added, email sent</td>')
self.assertContains(response, '<td>user does not exist, enrollment allowed, pending with auto enrollment on, email sent</td>')
# Check the outbox
self.assertEqual(len(mail.outbox), 2)
self.assertEqual(
mail.outbox[0].subject,
'You have been enrolled in Robot Super Course'
)
self.assertEqual(
mail.outbox[0].body,
"Dear ShibTest Enrolled\n\nYou have been enrolled in Robot Super Course "
"at edx.org by a member of the course staff. "
"The course should now appear on your edx.org dashboard.\n\n"
"To start accessing course materials, please visit "
"https://edx.org/courses/MITx/999/Robot_Super_Course/\n\n"
"----\nThis email was automatically sent from edx.org to ShibTest Enrolled"
)
self.assertEqual(
mail.outbox[1].subject,
'You have been invited to register for Robot Super Course'
)
self.assertEqual(
mail.outbox[1].body,
"Dear student,\n\nYou have been invited to join "
"Robot Super Course at edx.org by a member of the "
"course staff.\n\n"
"To access the course visit https://edx.org/courses/MITx/999/Robot_Super_Course/ and login.\n\n"
"----\nThis email was automatically sent from edx.org to "
"student5_1@test.com"
)
|
hkawasaki/kawasaki-aio8-1
|
lms/djangoapps/instructor/tests/test_legacy_enrollment.py
|
Python
|
agpl-3.0
| 14,496
|
[
"VisIt"
] |
a5b815b945c2afdcd15066b9a5e66c4fd1bbc4c2e9e7f42ad7a20c70dbf19e06
|
#
# Copyright (C) 2017-2021 The ESPResSo project
#
# This file is part of ESPResSo.
#
# ESPResSo is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ESPResSo is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
import unittest as ut
import espressomd
import numpy as np
import itertools
class StructureFactorTest(ut.TestCase):
'''
Test structure factor analysis against rectangular lattices.
We do not check the wavevectors directly, but rather the
corresponding SF order, which is more readable (integer value).
'''
box_l = 16
part_ty = 0
sf_order = 16
system = espressomd.System(box_l=[box_l, box_l, box_l])
def tearDown(self):
self.system.part.clear()
def peak_orders(self, wavevectors):
"""
Square and rescale wavevectors to recover the corresponding
SF order, which is an integer between 1 and ``self.sf_order**2``.
"""
peak_orders = (wavevectors * self.system.box_l[0] / (2 * np.pi))**2
peak_orders_int = np.around(peak_orders).astype(int)
np.testing.assert_array_almost_equal(peak_orders, peak_orders_int)
return peak_orders_int
def generate_peaks(self, a, b, c, conditions):
'''
Generate the main diffraction peaks for crystal structures.
Parameters
----------
a: :obj:`float`
Length of the unit cell on the x-axis.
b: :obj:`float`
Length of the unit cell on the y-axis.
c: :obj:`float`
Length of the unit cell on the z-axis.
conditions: :obj:`function`
Reflection conditions for the crystal lattice.
'''
hkl_ranges = [
range(0, self.sf_order + 1),
range(-self.sf_order, self.sf_order + 1),
range(-self.sf_order, self.sf_order + 1),
]
reflections = [np.linalg.norm([h / a, k / b, l / c]) * 2 * np.pi
for (h, k, l) in itertools.product(*hkl_ranges)
if conditions(h, k, l) and (h + k + l != 0) and
(h**2 + k**2 + l**2) <= self.sf_order**2]
return self.peak_orders(np.unique(reflections))
def test_tetragonal(self):
"""Check tetragonal lattice."""
a = 2
b = 4
c = 8
xen = range(0, self.box_l, a)
yen = range(0, self.box_l, b)
zen = range(0, self.box_l, c)
for i, j, k in itertools.product(xen, yen, zen):
self.system.part.add(type=self.part_ty, pos=(i, j, k))
wavevectors, intensities = self.system.analysis.structure_factor(
sf_types=[self.part_ty], sf_order=self.sf_order)
intensities = np.around(intensities, 8)
# no reflection conditions on (h,k,l)
peaks_ref = self.generate_peaks(a, b, c, lambda h, k, l: True)
peaks = self.peak_orders(wavevectors[np.nonzero(intensities)])
np.testing.assert_array_equal(peaks, peaks_ref[:len(peaks)])
def test_sc(self):
"""Check simple cubic lattice."""
l0 = 4
xen = range(0, self.box_l, l0)
for i, j, k in itertools.product(xen, repeat=3):
self.system.part.add(type=self.part_ty, pos=(i, j, k))
wavevectors, intensities = self.system.analysis.structure_factor(
sf_types=[self.part_ty], sf_order=self.sf_order)
intensities = np.around(intensities, 8)
np.testing.assert_array_equal(
intensities[np.nonzero(intensities)], len(self.system.part))
# no reflection conditions on (h,k,l)
peaks = self.peak_orders(wavevectors[np.nonzero(intensities)])
peaks_ref = self.generate_peaks(l0, l0, l0, lambda h, k, l: True)
np.testing.assert_array_equal(peaks, peaks_ref[:len(peaks)])
def test_bcc(self):
"""Check body-centered cubic lattice."""
l0 = 4
m = l0 / 2
xen = range(0, self.box_l, l0)
for i, j, k in itertools.product(xen, repeat=3):
self.system.part.add(type=self.part_ty, pos=(i, j, k))
self.system.part.add(type=self.part_ty, pos=(i + m, j + m, k + m))
wavevectors, intensities = self.system.analysis.structure_factor(
sf_types=[self.part_ty], sf_order=self.sf_order)
intensities = np.around(intensities, 8)
np.testing.assert_array_equal(
intensities[np.nonzero(intensities)], len(self.system.part))
# reflection conditions
# (h+k+l) even => F = 2f, otherwise F = 0
peaks_ref = self.generate_peaks(
l0, l0, l0, lambda h, k, l: (h + k + l) % 2 == 0)
peaks = self.peak_orders(wavevectors[np.nonzero(intensities)])
np.testing.assert_array_equal(peaks, peaks_ref[:len(peaks)])
def test_fcc(self):
"""Check face-centered cubic lattice."""
l0 = 4
m = l0 / 2
xen = range(0, self.box_l, l0)
for i, j, k in itertools.product(xen, repeat=3):
self.system.part.add(type=self.part_ty, pos=(i, j, k))
self.system.part.add(type=self.part_ty, pos=(i + m, j + m, k))
self.system.part.add(type=self.part_ty, pos=(i + m, j, k + m))
self.system.part.add(type=self.part_ty, pos=(i, j + m, k + m))
wavevectors, intensities = self.system.analysis.structure_factor(
sf_types=[self.part_ty], sf_order=self.sf_order)
intensities = np.around(intensities, 8)
np.testing.assert_array_equal(
intensities[np.nonzero(intensities)], len(self.system.part))
# reflection conditions
# (h,k,l) all even or odd => F = 4f, otherwise F = 0
peaks_ref = self.generate_peaks(
l0, l0, l0, lambda h, k, l:
h % 2 == 0 and k % 2 == 0 and l % 2 == 0 or
h % 2 == 1 and k % 2 == 1 and l % 2 == 1)
peaks = self.peak_orders(wavevectors[np.nonzero(intensities)])
np.testing.assert_array_equal(peaks, peaks_ref[:len(peaks)])
def test_cco(self):
"""Check c-centered orthorhombic lattice."""
l0 = 4
m = l0 / 2
xen = range(0, self.box_l, l0)
for i, j, k in itertools.product(xen, repeat=3):
self.system.part.add(type=self.part_ty, pos=(i, j, k))
self.system.part.add(type=self.part_ty, pos=(i + m, j + m, k))
wavevectors, intensities = self.system.analysis.structure_factor(
sf_types=[self.part_ty], sf_order=self.sf_order)
intensities = np.around(intensities, 8)
# reflection conditions
# (h+k) even => F = 2f, otherwise F = 0
peaks_ref = self.generate_peaks(
l0, l0, l0, lambda h, k, l: (h + k) % 2 == 0)
peaks = self.peak_orders(wavevectors[np.nonzero(intensities)])
np.testing.assert_array_equal(peaks, peaks_ref[:len(peaks)])
def test_exceptions(self):
with self.assertRaisesRegex(ValueError, 'order has to be a strictly positive number'):
self.system.analysis.structure_factor(sf_types=[0], sf_order=0)
if __name__ == "__main__":
ut.main()
|
espressomd/espresso
|
testsuite/python/sf_simple_lattice.py
|
Python
|
gpl-3.0
| 7,556
|
[
"CRYSTAL",
"ESPResSo"
] |
29255b416b5039db8927bc610a6242d34f07ec714101dff29895153f2a8b3590
|
#!/usr/bin/python
"""
US Federal holidays.
Copyright (c) 2015 Kauinoa
License: MIT
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated
documentation files (the "Software"), to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software,
and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or substantial portions of
the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO
THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""
from __future__ import unicode_literals
from __future__ import absolute_import
from datetime import date
from . import util
from . import christian_holidays
def get_new_years_day(year):
"""Get New Year's Day (January 1st)."""
return date(year, 1, 1)
def get_martin_luther_king_day(year):
"""
Get Martin Luther King Day.
3rd Monday in January.
"""
return util.get_date_in_month(year, util.JAN, util.MON, 3)
def get_presidents_day(year):
"""
Get Presidents' Day.
3rd Monday in February.
"""
return util.get_date_in_month(year, util.FEB, util.MON, 3)
def get_memorial_day(year):
"""Get Memorial Day (Last Monday in May)."""
return util.last_day_in_month(year, util.MAY, util.MON)
def get_independence_day_observed(year):
"""
Get Independence Day Observed.
If 4th of July falls on Sat, then it will be observed on be Friday the 3rd.
If 4th of July falls on Sun, then it will be observed on Monday the 5th.
"""
if date(year, util.JUL, 4).weekday() == util.SAT:
return date(year, util.JUL, 3)
elif date(year, util.JUL, 4).weekday() == util.SUN:
return date(year, util.JUL, 5)
else:
return None
def get_independence_day(year):
"""Get Independence Day (July 4th)."""
return date(year, util.JUL, 4)
def get_labor_day(year):
"""
Get Labor day.
1st Monday of September.
"""
return util.get_date_in_month(year, util.SEP, util.MON, 1)
def get_columbus_day(year):
"""
Get Columbus Day.
2nd Monday of October.
"""
return util.get_date_in_month(year, util.OCT, util.MON, 2)
def get_veterans_day(year):
"""Get Veteran's Day (November 11th)."""
return date(year, util.NOV, 11)
def get_thanksgiving_day(year):
"""
Get Thanksgiving day.
4th Thursday of November.
"""
return util.get_date_in_month(year, util.NOV, util.THU, 4)
def get_christmas_day(year):
"""Get Christmas (December 25th)."""
return christian_holidays.get_christmas_day(year)
holidays = {
"New Year's Day": get_new_years_day,
"Martin Luther King Day": get_martin_luther_king_day,
"Presidents' Day": get_presidents_day,
"Memorial Day": get_memorial_day,
"Independence Day (Observed)": get_independence_day_observed,
"Independence Day": get_independence_day,
"Labor Day": get_labor_day,
"Columbus Day": get_columbus_day,
"Veteran's Day": get_veterans_day,
"Thanksgiving": get_thanksgiving_day,
"Christmas Day": get_christmas_day
}
|
kauinoa/CalendarEvents
|
calendarevents/federal_holidays.py
|
Python
|
mit
| 3,668
|
[
"COLUMBUS"
] |
542b86de1b00f3951e4560a70d820f1eff7178b57f45ccdf67acb0e5414ffe06
|
from __future__ import print_function
import os
import numpy as np
import subprocess
# flopy imports
from ..modflow.mfdisu import ModflowDisU
from util_array import read1d, Util2d
from ..mbase import which
try:
import shapefile
except:
raise Exception('Error importing shapefile: ' +
'try pip install pyshp')
# todo
# creation of line and polygon shapefiles from features (holes!)
# program layer functionality for plot method
# support an asciigrid option for top and bottom interpolation
# add intersection capability
def features_to_shapefile(features, featuretype, filename):
"""
Write a shapefile for the features of type featuretype.
Parameters
----------
features : list
List of point, line, or polygon features
featuretype : str
Must be 'point', 'line', or 'polygon'
filename : string
name of the shapefile to write
Returns
-------
None
"""
if featuretype.lower() not in ['point', 'line', 'polygon']:
raise Exception('Unrecognized feature type: {}'.format(featuretype))
if featuretype.lower() == 'line':
wr = shapefile.Writer(shapeType=shapefile.POLYLINE)
wr.field("SHAPEID", "N", 20, 0)
for i, line in enumerate(features):
wr.line(line)
wr.record(i)
elif featuretype.lower() == 'point':
wr = shapefile.Writer(shapeType=shapefile.POINT)
wr.field("SHAPEID", "N", 20, 0)
for i, point in enumerate(features):
wr.point(point[0], point[1])
wr.record(i)
elif featuretype.lower() == 'polygon':
wr = shapefile.Writer(shapeType=shapefile.POLYGON)
wr.field("SHAPEID", "N", 20, 0)
for i, polygon in enumerate(features):
wr.poly(polygon)
wr.record(i)
wr.save(filename)
return
def ndarray_to_asciigrid(fname, a, extent, nodata=1.e30):
# extent info
xmin, xmax, ymin, ymax = extent
ncol, nrow = a.shape
dx = (xmax - xmin) / ncol
assert dx == (ymax - ymin) / nrow
# header
header = 'ncols {}\n'.format(ncol)
header += 'nrows {}\n'.format(nrow)
header += 'xllcorner {}\n'.format(xmin)
header += 'yllcorner {}\n'.format(ymin)
header += 'cellsize {}\n'.format(dx)
header += 'NODATA_value {}\n'.format(np.float(nodata))
# replace nan with nodata
idx = np.isnan(a)
a[idx] = np.float(nodata)
# write
with open(fname, 'w') as f:
f.write(header)
np.savetxt(f, a, fmt='%15.6e')
return
class Gridgen(object):
"""
Class to work with the gridgen program to create layered quadtree grids.
Parameters
----------
dis : flopy.modflow.ModflowDis
Flopy discretization object
model_ws : str
workspace location for creating gridgen files (default is '.')
exe_name : str
path and name of the gridgen program. (default is gridgen)
surface_interpolation : str
Default gridgen method for interpolating elevations. Valid options
include 'replicate' (default) and 'interpolate'
Notes
-----
For the surface elevations, the top of a layer uses the same surface as
the bottom of the overlying layer.
"""
def __init__(self, dis, model_ws='.', exe_name='gridgen',
surface_interpolation='replicate'):
self.nodes = 0
self.nja = 0
self._vertdict = {}
self.dis = dis
self.model_ws = model_ws
exe_name = which(exe_name)
if exe_name is None:
raise Exception('Cannot find gridgen binary executable')
self.exe_name = os.path.abspath(exe_name)
# Set default surface interpolation for all surfaces (nlay + 1)
surface_interpolation = surface_interpolation.upper()
if surface_interpolation not in ['INTERPOLATE', 'REPLICATE']:
raise Exception('Error. Unknown surface interpolation method: '
'{}. Must be INTERPOLATE or '
'REPLICATE'.format(surface_interpolation))
self.surface_interpolation = [surface_interpolation
for k in range(dis.nlay + 1)]
# Set up a blank _active_domain list with None for each layer
self._addict = {}
self._active_domain = []
for k in range(dis.nlay):
self._active_domain.append(None)
# Set up a blank _refinement_features list with empty list for
# each layer
self._rfdict = {}
self._refinement_features = []
for k in range(dis.nlay):
self._refinement_features.append([])
# Set up blank _elev and _elev_extent dictionaries
self._asciigrid_dict = {}
return
def set_surface_interpolation(self, isurf, type, elev=None,
elev_extent=None):
"""
Parameters
----------
isurf : int
surface number where 0 is top and nlay + 1 is bottom
type : str
Must be 'INTERPOLATE', 'REPLICATE' or 'ASCIIGRID'.
elev : numpy.ndarray of shape (nr, nc) or str
Array that is used as an asciigrid. If elev is a string, then
it is assumed to be the name of the asciigrid.
elev_extent : list-like
list of xmin, xmax, ymin, ymax extents of the elev grid.
Returns
-------
None
"""
assert 0 <= isurf <= self.dis.nlay + 1
type = type.upper()
if type not in ['INTERPOLATE', 'REPLICATE', 'ASCIIGRID']:
raise Exception('Error. Unknown surface interpolation type: '
'{}. Must be INTERPOLATE or '
'REPLICATE'.format(type))
else:
self.surface_interpolation[isurf] = type
if type == 'ASCIIGRID':
if isinstance(elev, np.ndarray):
if elev_extent is None:
raise Exception('Error. ASCIIGRID was specified but '
'elev_extent was not.')
try:
xmin, xmax, ymin, ymax = elev_extent
except:
raise Exception('Cannot cast elev_extent into xmin, xmax, '
'ymin, ymax: {}'.format(elev_extent))
nm = '_gridgen.lay{}.asc'.format(isurf)
fname = os.path.join(self.model_ws, nm)
ndarray_to_asciigrid(fname, elev, elev_extent)
self._asciigrid_dict[isurf] = nm
elif isinstance(elev, str):
if not os.path.isfile(elev):
raise Exception('Error. elev is not a valid file: '
'{}'.format(elev))
self._asciigrid_dict[isurf] = elev
else:
raise Exception('Error. ASCIIGRID was specified but '
'elev was not specified as a numpy ndarray or'
'valid asciigrid file.')
return
def add_active_domain(self, feature, layers):
"""
Parameters
----------
feature : str or list
feature can be either a string containing the name of a polygon
shapefile or it can be a list of polygons
layers : list
A list of layers (zero based) for which this active domain
applies.
Returns
-------
None
"""
# set nodes and nja to 0 to indicate that grid must be rebuilt
self.nodes = 0
self.nja = 0
# Create shapefile or set shapefile to feature
adname = 'ad{}'.format(len(self._addict))
if isinstance(feature, list):
# Create a shapefile
adname_w_path = os.path.join(self.model_ws, adname)
features_to_shapefile(feature, 'polygon', adname_w_path)
shapefile = adname
else:
shapefile = feature
self._addict[adname] = shapefile
sn = os.path.join(self.model_ws, shapefile + '.shp')
assert os.path.isfile(sn), 'Shapefile does not exist: {}'.format(sn)
for k in layers:
self._active_domain[k] = adname
return
def add_refinement_features(self, features, featuretype, level, layers):
"""
Parameters
----------
features : str or list
features can be either a string containing the name of a shapefile
or it can be a list of points, lines, or polygons
featuretype : str
Must be either 'point', 'line', or 'polygon'
level : int
The level of refinement for this features
layers : list
A list of layers (zero based) for which this refinement features
applies.
Returns
-------
None
"""
# set nodes and nja to 0 to indicate that grid must be rebuilt
self.nodes = 0
self.nja = 0
# Create shapefile or set shapefile to feature
rfname = 'rf{}'.format(len(self._rfdict))
if isinstance(features, list):
rfname_w_path = os.path.join(self.model_ws, rfname)
features_to_shapefile(features, featuretype, rfname_w_path)
shapefile = rfname
else:
shapefile = features
self._rfdict[rfname] = [shapefile, featuretype, level]
sn = os.path.join(self.model_ws, shapefile + '.shp')
assert os.path.isfile(sn), 'Shapefile does not exist: {}'.format(sn)
for k in layers:
self._refinement_features[k].append(rfname)
return
def build(self, verbose=False):
"""
Build the quadtree grid
Parameters
----------
verbose : bool
If true, print the results of the gridgen command to the terminal
(default is False)
Returns
-------
None
"""
fname = os.path.join(self.model_ws, '_gridgen_build.dfn')
f = open(fname, 'w')
# Write the basegrid information
f.write(self._mfgrid_block())
f.write(2 * '\n')
# Write the quadtree builder block
f.write(self._builder_block())
f.write(2 * '\n')
# Write the active domain blocks
f.write(self._ad_blocks())
f.write(2 * '\n')
# Write the refinement features
f.write(self._rf_blocks())
f.write(2 * '\n')
f.close()
# Command: gridgen quadtreebuilder _gridgen_build.dfn
qtgfname = os.path.join(self.model_ws, 'quadtreegrid.dfn')
if os.path.isfile(qtgfname):
os.remove(qtgfname)
cmds = [self.exe_name, 'quadtreebuilder', '_gridgen_build.dfn']
buff = subprocess.check_output(cmds, cwd=self.model_ws)
if verbose:
print(buff)
assert os.path.isfile(qtgfname)
# Export the grid to shapefiles, usgdata, and vtk files
self.export(verbose)
# Create a dictionary that relates nodenumber to vertices
self._mkvertdict()
return
def get_vertices(self, nodenumber):
"""
Return a list of 5 vertices for the cell. The first vertex should
be the same as the last vertex.
Parameters
----------
nodenumber
Returns
-------
list of vertices : list
"""
return self._vertdict[nodenumber]
def get_center(self, nodenumber):
"""
Return the cell center x and y coordinates
Parameters
----------
nodenumber
Returns
-------
(x, y) : tuple
"""
vts = self.get_vertices(nodenumber)
xmin = vts[0][0]
xmax = vts[1][0]
ymin = vts[2][1]
ymax = vts[0][1]
return ((xmin + xmax) * 0.5, (ymin + ymax) * 0.5)
def export(self, verbose=False):
"""
Export the quadtree grid to shapefiles, usgdata, and vtk
Returns
-------
None
"""
# Create the export definition file
fname = os.path.join(self.model_ws, '_gridgen_export.dfn')
f = open(fname, 'w')
f.write('LOAD quadtreegrid.dfn\n')
f.write('\n')
f.write(self._grid_export_blocks())
f.close()
assert os.path.isfile(fname), \
'Could not create export dfn file: {}'.format(fname)
# Export shapefiles
cmds = [self.exe_name, 'grid_to_shapefile_poly', '_gridgen_export.dfn']
buff = []
try:
buff = subprocess.check_output(cmds, cwd=self.model_ws)
if verbose:
print(buff)
fn = os.path.join(self.model_ws, 'qtgrid.shp')
assert os.path.isfile(fn)
except:
print('Error. Failed to export polygon shapefile of grid', buff)
cmds = [self.exe_name, 'grid_to_shapefile_point',
'_gridgen_export.dfn']
buff = []
try:
buff = subprocess.check_output(cmds, cwd=self.model_ws)
if verbose:
print(buff)
fn = os.path.join(self.model_ws, 'qtgrid_pt.shp')
assert os.path.isfile(fn)
except:
print('Error. Failed to export polygon shapefile of grid', buff)
# Export the usg data
cmds = [self.exe_name, 'grid_to_usgdata', '_gridgen_export.dfn']
buff = []
try:
buff = subprocess.check_output(cmds, cwd=self.model_ws)
if verbose:
print(buff)
fn = os.path.join(self.model_ws, 'qtg.nod')
assert os.path.isfile(fn)
except:
print('Error. Failed to export usgdata', buff)
# Export vtk
cmds = [self.exe_name, 'grid_to_vtk', '_gridgen_export.dfn']
buff = []
try:
buff = subprocess.check_output(cmds, cwd=self.model_ws)
if verbose:
print(buff)
fn = os.path.join(self.model_ws, 'qtg.vtu')
assert os.path.isfile(fn)
except:
print('Error. Failed to export vtk file', buff)
cmds = [self.exe_name, 'grid_to_vtk_sv', '_gridgen_export.dfn']
buff = []
try:
buff = subprocess.check_output(cmds, cwd=self.model_ws)
if verbose:
print(buff)
fn = os.path.join(self.model_ws, 'qtg_sv.vtu')
assert os.path.isfile(fn)
except:
print('Error. Failed to export shared vertex vtk file', buff)
return
def plot(self, ax=None, layer=0, edgecolor='k', facecolor='none',
cmap='Dark2', a=None, masked_values=None, **kwargs):
"""
Plot the grid. This method will plot the grid using the shapefile
that was created as part of the build method.
Note that the layer option is not working yet.
Parameters
----------
ax : matplotlib.pyplot axis
The plot axis. If not provided it, plt.gca() will be used.
If there is not a current axis then a new one will be created.
layer : int
Not working! This should show only this layer, but there is no
way to do this yet with plot_shapefile.
cmap : string
Name of colormap to use for polygon shading (default is 'Dark2')
edgecolor : string
Color name. (Default is 'scaled' to scale the edge colors.)
facecolor : string
Color name. (Default is 'scaled' to scale the face colors.)
a : numpy.ndarray
Array to plot.
masked_values : iterable of floats, ints
Values to mask.
kwargs : dictionary
Keyword arguments that are passed to
PatchCollection.set(``**kwargs``). Some common kwargs would be
'linewidths', 'linestyles', 'alpha', etc.
Returns
-------
pc : matplotlib.collections.PatchCollection
"""
import matplotlib.pyplot as plt
from flopy.plot import plot_shapefile, shapefile_extents
if ax is None:
ax = plt.gca()
shapename = os.path.join(self.model_ws, 'qtgrid')
xmin, xmax, ymin, ymax = shapefile_extents(shapename)
pc = plot_shapefile(shapename, ax=ax, edgecolor=edgecolor,
facecolor=facecolor, cmap=cmap, a=a,
masked_values=masked_values, **kwargs)
plt.xlim(xmin, xmax)
plt.ylim(ymin, ymax)
return pc
def get_nod_recarray(self):
"""
Load the qtg.nod file and return as a numpy recarray
Returns
-------
node_ra : ndarray
Recarray representation of the node file
"""
# nodes, nlay, ivsd, itmuni, lenuni, idsymrd, laycbd
fname = os.path.join(self.model_ws, 'qtg.nod')
f = open(fname, 'r')
dt = np.dtype([('node', np.int), ('layer', np.int),
('x', np.float), ('y', np.float), ('z', np.float),
('dx', np.float), ('dy', np.float), ('dz', np.float),
])
node_ra = np.genfromtxt(fname, dtype=dt, skip_header=1)
return node_ra
def get_disu(self, model, nper=1, perlen=1, nstp=1, tsmult=1, steady=True,
itmuni=4, lenuni=2):
# nodes, nlay, ivsd, itmuni, lenuni, idsymrd, laycbd
fname = os.path.join(self.model_ws, 'qtg.nod')
f = open(fname, 'r')
line = f.readline()
ll = line.strip().split()
nodes = int(ll.pop(0))
f.close()
nlay = self.dis.nlay
ivsd = 0
idsymrd = 0
laycbd = 0
# Save nodes
self.nodes = nodes
# nodelay
nodelay = np.empty((nlay), dtype=np.int)
fname = os.path.join(self.model_ws, 'qtg.nodesperlay.dat')
f = open(fname, 'r')
nodelay = read1d(f, nodelay)
f.close()
# top
top = [0] * nlay
for k in range(nlay):
fname = os.path.join(self.model_ws,
'quadtreegrid.top{}.dat'.format(k + 1))
f = open(fname, 'r')
tpk = np.empty((nodelay[k]), dtype=np.float32)
tpk = read1d(f, tpk)
f.close()
if tpk.min() == tpk.max():
tpk = tpk.min()
else:
tpk = Util2d(model, (1, nodelay[k]), np.float32,
np.reshape(tpk, (1, nodelay[k])),
name='top {}'.format(k + 1))
top[k] = tpk
# bot
bot = [0] * nlay
for k in range(nlay):
fname = os.path.join(self.model_ws,
'quadtreegrid.bot{}.dat'.format(k + 1))
f = open(fname, 'r')
btk = np.empty((nodelay[k]), dtype=np.float32)
btk = read1d(f, btk)
f.close()
if btk.min() == btk.max():
btk = btk.min()
else:
btk = Util2d(model, (1, nodelay[k]), np.float32,
np.reshape(btk, (1, nodelay[k])),
name='bot {}'.format(k + 1))
bot[k] = btk
# area
area = [0] * nlay
fname = os.path.join(self.model_ws, 'qtg.area.dat')
f = open(fname, 'r')
anodes = np.empty((nodes), dtype=np.float32)
anodes = read1d(f, anodes)
f.close()
istart = 0
for k in range(nlay):
istop = istart + nodelay[k]
ark = anodes[istart: istop]
if ark.min() == ark.max():
ark = ark.min()
else:
ark = Util2d(model, (1, nodelay[k]), np.float32,
np.reshape(ark, (1, nodelay[k])),
name='area layer {}'.format(k + 1))
area[k] = ark
istart = istop
# iac
iac = np.empty((nodes), dtype=np.int)
fname = os.path.join(self.model_ws, 'qtg.iac.dat')
f = open(fname, 'r')
iac = read1d(f, iac)
f.close()
# Calculate njag and save as nja to self
njag = iac.sum()
self.nja = njag
# ja
ja = np.empty((njag), dtype=np.int)
fname = os.path.join(self.model_ws, 'qtg.ja.dat')
f = open(fname, 'r')
ja = read1d(f, ja)
f.close()
# ivc
ivc = np.empty((njag), dtype=np.int)
fname = os.path.join(self.model_ws, 'qtg.fldr.dat')
f = open(fname, 'r')
ivc = read1d(f, ivc)
f.close()
cl1 = None
cl2 = None
# cl12
cl12 = np.empty((njag), dtype=np.float32)
fname = os.path.join(self.model_ws, 'qtg.c1.dat')
f = open(fname, 'r')
cl12 = read1d(f, cl12)
f.close()
# fahl
fahl = np.empty((njag), dtype=np.float32)
fname = os.path.join(self.model_ws, 'qtg.fahl.dat')
f = open(fname, 'r')
fahl = read1d(f, fahl)
f.close()
# create dis object instance
disu = ModflowDisU(model, nodes=nodes, nlay=nlay, njag=njag, ivsd=ivsd,
nper=nper, itmuni=itmuni, lenuni=lenuni,
idsymrd=idsymrd, laycbd=laycbd, nodelay=nodelay,
top=top, bot=bot, area=area, iac=iac, ja=ja,
ivc=ivc, cl1=cl1, cl2=cl2, cl12=cl12, fahl=fahl,
perlen=perlen, nstp=nstp, tsmult=tsmult,
steady=steady)
# return dis object instance
return disu
def intersect(self, features, featuretype, layer):
"""
Parameters
----------
features : str or list
features can be either a string containing the name of a shapefile
or it can be a list of points, lines, or polygons
featuretype : str
Must be either 'point', 'line', or 'polygon'
layer : int
Layer (zero based) to intersect with. Zero based.
Returns
-------
result : np.recarray
Recarray of the intersection properties.
"""
ifname = 'intersect_feature'
if isinstance(features, list):
ifname_w_path = os.path.join(self.model_ws, ifname)
if os.path.exists(ifname_w_path + '.shp'):
os.remove(ifname_w_path + '.shp')
features_to_shapefile(features, featuretype, ifname_w_path)
shapefile = ifname
else:
shapefile = features
sn = os.path.join(self.model_ws, shapefile + '.shp')
assert os.path.isfile(sn), 'Shapefile does not exist: {}'.format(sn)
fname = os.path.join(self.model_ws, '_intersect.dfn')
if os.path.isfile(fname):
os.remove(fname)
f = open(fname, 'w')
f.write('LOAD quadtreegrid.dfn\n')
f.write(1 * '\n')
f.write(self._intersection_block(shapefile, featuretype, layer))
f.close()
# Intersect
cmds = [self.exe_name, 'intersect', '_intersect.dfn']
buff = []
fn = os.path.join(self.model_ws, 'intersection.ifo')
if os.path.isfile(fn):
os.remove(fn)
try:
buff = subprocess.check_output(cmds, cwd=self.model_ws)
except:
print('Error. Failed to perform intersection', buff)
# Make sure new intersection file was created.
if not os.path.isfile(fn):
s = ('Error. Failed to perform intersection', buff)
raise Exception(s)
# Calculate the number of columns to import
# The extra comma causes one too many columns, so calculate the length
f = open(fn, 'r')
line = f.readline()
f.close()
ncol = len(line.strip().split(',')) - 1
# Load the intersection results as a recarray, convert nodenumber
# to zero-based and return
result = np.genfromtxt(fn, dtype=None, names=True, delimiter=',',
usecols=tuple(range(ncol)))
result = result.view(np.recarray)
result['nodenumber'] -= 1
return result
def _intersection_block(self, shapefile, featuretype, layer):
s = ''
s += 'BEGIN GRID_INTERSECTION intersect' + '\n'
s += ' GRID = quadtreegrid\n'
s += ' LAYER = {}\n'.format(layer + 1)
s += ' SHAPEFILE = {}\n'.format(shapefile)
s += ' FEATURE_TYPE = {}\n'.format(featuretype)
s += ' OUTPUT_FILE = {}\n'.format('intersection.ifo')
s += 'END GRID_INTERSECTION intersect' + '\n'
return s
def _mfgrid_block(self):
# Need to adjust offsets and rotation because gridgen rotates around
# lower left corner, whereas flopy rotates around upper left.
# gridgen rotation is counter clockwise, whereas flopy rotation is
# clock wise. Crazy.
sr = self.dis.parent.sr
xll = sr.xul
yll = sr.yul - sr.yedge[0]
xllrot, yllrot = sr.rotate(xll, yll, sr.rotation, xorigin=sr.xul,
yorigin=sr.yul)
s = ''
s += 'BEGIN MODFLOW_GRID basegrid' + '\n'
s += ' ROTATION_ANGLE = {}\n'.format(-sr.rotation)
s += ' X_OFFSET = {}\n'.format(xllrot)
s += ' Y_OFFSET = {}\n'.format(yllrot)
s += ' NLAY = {}\n'.format(self.dis.nlay)
s += ' NROW = {}\n'.format(self.dis.nrow)
s += ' NCOL = {}\n'.format(self.dis.ncol)
# delr
delr = self.dis.delr.array
if delr.min() == delr.max():
s += ' DELR = CONSTANT {}\n'.format(delr.min())
else:
s += ' DELR = OPEN/CLOSE delr.dat\n'
fname = os.path.join(self.model_ws, 'delr.dat')
np.savetxt(fname, delr)
# delc
delc = self.dis.delc.array
if delc.min() == delc.max():
s += ' DELC = CONSTANT {}\n'.format(delc.min())
else:
s += ' DELC = OPEN/CLOSE delc.dat\n'
fname = os.path.join(self.model_ws, 'delc.dat')
np.savetxt(fname, delc)
# top
top = self.dis.top.array
if top.min() == top.max():
s += ' TOP = CONSTANT {}\n'.format(top.min())
else:
s += ' TOP = OPEN/CLOSE top.dat\n'
fname = os.path.join(self.model_ws, 'top.dat')
np.savetxt(fname, top)
# bot
botm = self.dis.botm
for k in range(self.dis.nlay):
bot = botm[k].array
if bot.min() == bot.max():
s += ' BOTTOM LAYER {} = CONSTANT {}\n'.format(k + 1,
bot.min())
else:
s += ' BOTTOM LAYER {0} = OPEN/CLOSE bot{0}.dat\n'.format(k +
1)
fname = os.path.join(self.model_ws, 'bot{}.dat'.format(k + 1))
np.savetxt(fname, bot)
s += 'END MODFLOW_GRID' + '\n'
return s
def _rf_blocks(self):
s = ''
for rfname, rf in self._rfdict.items():
shapefile, featuretype, level = rf
s += 'BEGIN REFINEMENT_FEATURES {}\n'.format(rfname)
s += ' SHAPEFILE = {}\n'.format(shapefile)
s += ' FEATURE_TYPE = {}\n'.format(featuretype)
s += ' REFINEMENT_LEVEL = {}\n'.format(level)
s += 'END REFINEMENT_FEATURES\n'
s += 2 * '\n'
return s
def _ad_blocks(self):
s = ''
for adname, shapefile in self._addict.items():
s += 'BEGIN ACTIVE_DOMAIN {}\n'.format(adname)
s += ' SHAPEFILE = {}\n'.format(shapefile)
s += ' FEATURE_TYPE = {}\n'.format('polygon')
s += ' INCLUDE_BOUNDARY = {}\n'.format('True')
s += 'END ACTIVE_DOMAIN\n'
s += 2 * '\n'
return s
def _builder_block(self):
s = 'BEGIN QUADTREE_BUILDER quadtreebuilder\n'
s += ' MODFLOW_GRID = basegrid\n'
# Write active domain information
for k, adk in enumerate(self._active_domain):
if adk is None:
continue
s += ' ACTIVE_DOMAIN LAYER {} = {}\n'.format(k + 1, adk)
# Write refinement feature information
for k, rfkl in enumerate(self._refinement_features):
if len(rfkl) == 0:
continue
s += ' REFINEMENT_FEATURES LAYER {} = '.format(k + 1)
for rf in rfkl:
s += rf + ' '
s += '\n'
s += ' SMOOTHING = full\n'
for k in range(self.dis.nlay):
if self.surface_interpolation[k] == 'ASCIIGRID':
grd = '_gridgen.lay{}.asc'.format(k)
else:
grd = 'basename'
s += ' TOP LAYER {} = {} {}\n'.format(k + 1,
self.surface_interpolation[k],
grd)
for k in range(self.dis.nlay):
if self.surface_interpolation[k + 1] == 'ASCIIGRID':
grd = '_gridgen.lay{}.asc'.format(k + 1)
else:
grd = 'basename'
s += ' BOTTOM LAYER {} = {} {}\n'.format(k + 1,
self.surface_interpolation[k + 1],
grd)
s += ' GRID_DEFINITION_FILE = quadtreegrid.dfn\n'
s += 'END QUADTREE_BUILDER\n'
return s
def _grid_export_blocks(self):
s = 'BEGIN GRID_TO_SHAPEFILE grid_to_shapefile_poly\n'
s += ' GRID = quadtreegrid\n'
s += ' SHAPEFILE = qtgrid\n'
s += ' FEATURE_TYPE = polygon\n'
s += 'END GRID_TO_SHAPEFILE\n'
s += '\n'
s += 'BEGIN GRID_TO_SHAPEFILE grid_to_shapefile_point\n'
s += ' GRID = quadtreegrid\n'
s += ' SHAPEFILE = qtgrid_pt\n'
s += ' FEATURE_TYPE = point\n'
s += 'END GRID_TO_SHAPEFILE\n'
s += '\n'
s += 'BEGIN GRID_TO_USGDATA grid_to_usgdata\n'
s += ' GRID = quadtreegrid\n'
s += ' USG_DATA_PREFIX = qtg\n'
s += 'END GRID_TO_USGDATA\n'
s += '\n'
s += 'BEGIN GRID_TO_VTKFILE grid_to_vtk\n'
s += ' GRID = quadtreegrid\n'
s += ' VTKFILE = qtg\n'
s += ' SHARE_VERTEX = False\n'
s += 'END GRID_TO_VTKFILE\n'
s += '\n'
s += 'BEGIN GRID_TO_VTKFILE grid_to_vtk_sv\n'
s += ' GRID = quadtreegrid\n'
s += ' VTKFILE = qtg_sv\n'
s += ' SHARE_VERTEX = True\n'
s += 'END GRID_TO_VTKFILE\n'
return s
def _mkvertdict(self):
"""
Create the self._vertdict dictionary that maps the nodenumber to
the vertices
Returns
-------
None
"""
# ensure there are active leaf cells from gridgen
fname = os.path.join(self.model_ws, 'qtg.nod')
if not os.path.isfile(fname):
raise Exception('File {} should have been created by gridgen.'.
format(fname))
f = open(fname, 'r')
line = f.readline()
ll = line.strip().split()
nodes = int(ll[0])
if nodes == 0:
raise Exception('Gridgen resulted in no active cells.')
# ensure shape file was created by gridgen
fname = os.path.join(self.model_ws, 'qtgrid.shp')
assert os.path.isfile(fname), 'gridgen shape file does not exist'
# read vertices from shapefile
sf = shapefile.Reader(fname)
shapes = sf.shapes()
fields = sf.fields
attributes = [l[0] for l in fields[1:]]
records = sf.records()
idx = attributes.index('nodenumber')
for i in range(len(shapes)):
nodenumber = int(records[i][idx]) - 1
self._vertdict[nodenumber] = shapes[i].points
return
|
mrustl/flopy
|
flopy/utils/gridgen.py
|
Python
|
bsd-3-clause
| 32,177
|
[
"VTK"
] |
907158cc7e555897747df213fd41f7c343891943801c05606f718d539de69d65
|
#!/usr/bin/env python
""" update local cfg
"""
from __future__ import print_function
from __future__ import absolute_import
from __future__ import division
import os
from diraccfg import CFG
from DIRAC.Core.Utilities.DIRACScript import DIRACScript as Script
Script.setUsageMessage("\n".join([__doc__.split("\n")[1], "Usage:", " %s [options]" % Script.scriptName]))
Script.registerSwitch("F:", "file=", "set the cfg file to update.")
Script.registerSwitch("V:", "vo=", "set the VO.")
Script.registerSwitch("S:", "setup=", "set the software dist module to update.")
Script.registerSwitch("D:", "softwareDistModule=", "set the software dist module to update.")
Script.parseCommandLine()
args = Script.getPositionalArgs()
from DIRAC import gConfig
cFile = ""
sMod = ""
vo = ""
setup = ""
for unprocSw in Script.getUnprocessedSwitches():
if unprocSw[0] in ("F", "file"):
cFile = unprocSw[1]
if unprocSw[0] in ("V", "vo"):
vo = unprocSw[1]
if unprocSw[0] in ("D", "softwareDistModule"):
sMod = unprocSw[1]
if unprocSw[0] in ("S", "setup"):
setup = unprocSw[1]
localCfg = CFG()
if cFile:
localConfigFile = cFile
else:
print("WORKSPACE: %s" % os.path.expandvars("$WORKSPACE"))
if os.path.isfile(os.path.expandvars("$WORKSPACE") + "/PilotInstallDIR/etc/dirac.cfg"):
localConfigFile = os.path.expandvars("$WORKSPACE") + "/PilotInstallDIR/etc/dirac.cfg"
elif os.path.isfile(os.path.expandvars("$WORKSPACE") + "/ServerInstallDIR/etc/dirac.cfg"):
localConfigFile = os.path.expandvars("$WORKSPACE") + "/ServerInstallDIR/etc/dirac.cfg"
elif os.path.isfile("./etc/dirac.cfg"):
localConfigFile = "./etc/dirac.cfg"
else:
print("Local CFG file not found")
exit(2)
localCfg.loadFromFile(localConfigFile)
if not localCfg.isSection("/LocalSite"):
localCfg.createNewSection("/LocalSite")
localCfg.setOption("/LocalSite/CPUTimeLeft", 5000)
localCfg.setOption("/DIRAC/Security/UseServerCertificate", False)
if not sMod:
if not setup:
setup = gConfig.getValue("/DIRAC/Setup")
if not setup:
setup = "dirac-JenkinsSetup"
if not localCfg.isSection("/Operations"):
localCfg.createNewSection("/Operations")
if not localCfg.isSection("/Operations/%s" % setup):
localCfg.createNewSection("/Operations/%s" % setup)
localCfg.setOption("/Operations/%s/SoftwareDistModule" % setup, "")
localCfg.writeToFile(localConfigFile)
|
ic-hep/DIRAC
|
tests/Jenkins/dirac-cfg-update.py
|
Python
|
gpl-3.0
| 2,479
|
[
"DIRAC"
] |
1c06eb49f854be9fa64520614332a7ef04eed856d47e6f4383b3d7a515e476b3
|
# -*- coding: utf-8 -*-
"""
Data conversion utilities for igraph
=====================================
Convert cytoscape.js style graphs from/to igraph object.
http://igraph.org/python/
"""
import igraph as ig
DEF_SCALING = 100.0
def from_igraph(igraph_network, layout=None, scale=DEF_SCALING):
"""
Convert igraph object to cytoscape.js style graphs.
:param igraph_network: igraph object.
:param layout:
:param scale: This is the scale of graph. You can scale the original igraph size to Cytoscape size.
:return : cytoscape.js style graph object.
"""
new_graph = {}
network_data = {}
elements = {}
nodes = []
edges = []
# Convert network attributes
network_attr = igraph_network.attributes()
for key in network_attr:
network_data[key] = igraph_network[key]
# get network as a list of edges
edges_original = igraph_network.es
nodes_original = igraph_network.vs
node_attr = igraph_network.vs.attributes()
for idx, node in enumerate(nodes_original):
new_node = {}
data = {}
data['id'] = str(node.index)
data['name'] = str(node.index)
for key in node_attr:
data[key] = node[key]
new_node['data'] = data
if layout is not None:
position = {}
position['x'] = layout[idx][0] * scale
position['y'] = layout[idx][1] * scale
new_node['position'] = position
nodes.append(new_node)
# Add edges to the elements
edge_attr = igraph_network.es.attributes()
for edge in edges_original:
new_edge = {}
data = {}
data['source'] = str(edge.source)
data['target'] = str(edge.target)
for key in edge_attr:
data[key] = edge[key]
new_edge['data'] = data
edges.append(new_edge)
elements['nodes'] = nodes
elements['edges'] = edges
new_graph['elements'] = elements
new_graph['data'] = network_data
return new_graph
def to_igraph(network):
"""
Convert cytoscape.js style graphs to igraph object.
:param network: the cytoscape.js style netwrok.
:return: the igraph object.
"""
nodes = network['elements']['nodes']
edges = network['elements']['edges']
network_attr = network['data']
node_count = len(nodes)
edge_count = len(edges)
g = ig.Graph()
# Graph attributes
for key in network_attr.keys():
g[key] = network_attr[key]
g.add_vertices(nodes)
# Add node attributes
node_attributes = {}
node_id_dict = {}
for i, node in enumerate(nodes):
data = node['data']
for key in data.keys():
if key not in node_attributes:
node_attributes[key] = [None] * node_count
# Save index to map
if key == 'id':
node_id_dict[data[key]] = i
node_attributes[key][i] = data[key]
for key in node_attributes.keys():
g.vs[key] = node_attributes[key]
# Create edges
edge_tuples = []
edge_attributes = {}
for i, edge in enumerate(edges):
data = edge['data']
source = data['source']
target = data['target']
edge_tuple = (node_id_dict[source], node_id_dict[target])
edge_tuples.append(edge_tuple)
for key in data.keys():
if key not in edge_attributes:
edge_attributes[key] = [None] * edge_count
# Save index to map
edge_attributes[key][i] = data[key]
g.add_edges(edge_tuples)
# Assign edge attributes
for key in edge_attributes.keys():
if key == 'source' or key == 'target':
continue
else:
g.es[key] = edge_attributes[key]
return g
|
idekerlab/cyrest-examples
|
notebooks/cookbook/Python-cookbook/util/util_igraph.py
|
Python
|
mit
| 3,777
|
[
"Cytoscape"
] |
120d003c34c8e1c379bbb5d2800497a830842c88c1323f8af823cf708bf18f95
|
# Copyright 2020,2021 Sony Corporation.
# Copyright 2021 Sony Group Corporation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
import nnabla as nn
import numpy as np
class PrettyPrinter():
"""
Pretty printer to print the graph structure used with the `visit` method of a Variable.
Attributes:
functions (list of dict): List of functions of which element is the dictionary.
The (key, value) pair is the (`name`, function name), (`inputs`, list of input variables),
and (`outputs`, list of output variables) of a function.
"""
def __init__(self, summary=False, hidden=False):
"""
Args:
summary (bool): Print statictis of a intermediate variable.
hidden (bool): Store the intermediate input and output variables if True.
"""
self._summary = summary
self._hidden = hidden
self.functions = []
def get_scope_name(self, x):
params = nn.get_parameters()
values = list(params.values())
keys = list(params.keys())
if x in values:
idx = values.index(x)
scope = "/".join(keys[idx].split("/")[:-1])
else:
scope = None
return scope
def __call__(self, f):
scope = self.get_scope_name(f.inputs[1]) if len(f.inputs) > 1 else None
name = "{}/{}({})".format(scope, f.name, f.info.type_name) if scope else \
"{}({})".format(f.name, f.info.type_name)
print(name)
print("\tDepth = {}".format(f.rank))
print("\tArgs:", ["{}={}".format(k, v)
for k, v in f.info.args.items()])
print("\tInputs:", [i.shape for i in f.inputs])
print("\tOutputs:", [o.shape for o in f.outputs])
print("\tBackward Inputs:", [i.need_grad for i in f.inputs])
if self._summary:
print("\tInput Data:")
print("\t\tMed: ", [np.median(i.d) for i in f.inputs])
print("\t\tAve: ", [np.mean(i.d) for i in f.inputs])
print("\t\tStd: ", [np.std(i.d) for i in f.inputs])
print("\t\tMin: ", [np.min(i.d) for i in f.inputs])
print("\t\tMax: ", [np.max(i.d) for i in f.inputs])
print("\tOutput Data:")
print("\t\tMed: ", [np.median(i.d) for i in f.outputs])
print("\t\tAve: ", [np.mean(i.d) for i in f.outputs])
print("\t\tStd: ", [np.std(i.d) for i in f.outputs])
print("\t\tMin: ", [np.min(i.d) for i in f.outputs])
print("\t\tMax: ", [np.max(i.d) for i in f.outputs])
print("\tInput Grads:")
print("\t\tMed: ", [np.median(i.g) for i in f.inputs])
print("\t\tAve: ", [np.mean(i.g) for i in f.inputs])
print("\t\tStd: ", [np.std(i.g) for i in f.inputs])
print("\t\tMin: ", [np.min(i.g) for i in f.inputs])
print("\t\tMax: ", [np.max(i.g) for i in f.inputs])
print("\tOutput Grads:")
print("\t\tMed: ", [np.median(i.g) for i in f.outputs])
print("\t\tAve: ", [np.mean(i.g) for i in f.outputs])
print("\t\tStd: ", [np.std(i.g) for i in f.outputs])
print("\t\tMin: ", [np.min(i.g) for i in f.outputs])
print("\t\tMax: ", [np.max(i.g) for i in f.outputs])
if self._hidden:
h = dict(name=name,
inputs=[i for i in f.inputs],
outputs=[o for o in f.outputs])
self.functions.append(h)
def pprint(v, forward=False, backward=False, summary=False, hidden=False, printer=False):
"""
Pretty print information of a graph from a root variable `v`.
Note that in order to print the summary statistics, this function stores, i.e., does not reuse
the intermediate buffers of a computation graph, increasing the memory usage
if either the forward or backward is True.
Args:
v (:obj:`nnabla.Variable`): Root variable.
forward (bool): Call the forward method of a variable `v`.
backward (bool): Call the backward method of a variable `v`.
summary (bool): Print statictis of a intermediate variable.
hidden (bool): Store the intermediate input and output variables if True.
printer (bool): Return the printer object if True.
Example:
.. code-block:: python
pred = Model(...)
from nnabla.utils.inspection import pprint
pprint(pred, summary=True, forward=True, backward=True)
"""
v.forward() if forward else None
v.backward() if backward else None
pprinter = PrettyPrinter(summary, hidden)
v.visit(pprinter)
return pprinter if printer else None
|
sony/nnabla
|
python/src/nnabla/utils/inspection/pretty_print.py
|
Python
|
apache-2.0
| 5,235
|
[
"VisIt"
] |
f2788e67f88acf54e4b23576c10a64a857b98c4b1a1328b4f742333b863df311
|
"""
Fixtures for testing notifications
"""
from copy import deepcopy
from octopus.lib import dates, paths
import os
RESOURCES = paths.rel2abs(__file__, "..", "resources")
"""Path to the test resources directory, calculated relative to this file"""
class NotificationFactory(object):
"""
Class which provides access to the various fixtures used for testing the notifications
"""
@classmethod
def notification_list(cls, since, page=1, pageSize=10, count=1, ids=None, analysis_dates=None):
"""
Example notification list
:param since: since date for list
:param page: page number of list
:param pageSize: number of results in list
:param count: total number of results
:param ids: ids of notifications to be included
:param analysis_dates: analysis dates of notifications; should be same length as ids, as they will be tied up
:return:
"""
nl = deepcopy(NOTIFICATION_LIST)
nl["since"] = since
nl["page"] = page
nl["pageSize"] = pageSize
nl["total"]= count
nl["timestamp"] = dates.now()
this_page = pageSize if page * pageSize <= count else count - ((page - 1) * pageSize)
for i in range(this_page):
note = deepcopy(OUTGOING)
if ids is not None:
note["id"] = ids[i]
if analysis_dates is not None:
note["analysis_date"] = analysis_dates[i]
nl["notifications"].append(note)
return nl
@classmethod
def error_response(cls):
"""
JPER API error response
:return: error response
"""
return deepcopy(LIST_ERROR)
@classmethod
def unrouted_notification(cls):
"""
Example unrouted notification
:return:
"""
return deepcopy(BASE_NOTIFICATION)
@classmethod
def outgoing_notification(cls):
"""
Example outgoing notification
:return:
"""
return deepcopy(OUTGOING)
@classmethod
def special_character_notification(cls):
"""
Example special character notification
:return:
"""
return deepcopy(SPECIAL_CHARACTER)
@classmethod
def example_package_path(cls):
"""
Path to binary file which can be used for testing
:return:
"""
return os.path.join(RESOURCES, "example.zip")
LIST_ERROR = {
"error" : "request failed"
}
"""Example API error"""
BASE_NOTIFICATION = {
"id" : "1234567890",
"created_date" : "2015-02-02T00:00:00Z",
"event" : "publication",
"provider" : {
"id" : "pub1",
"agent" : "test/0.1",
"ref" : "xyz",
"route" : "api"
},
"content" : {
"packaging_format" : "https://pubrouter.jisc.ac.uk/FilesAndJATS",
"store_id" : "abc"
},
"links" : [
{
"type" : "splash",
"format" : "text/html",
"access" : "public",
"url" : "http://example.com/article/1"
},
{
"type" : "fulltext",
"format" : "application/pdf",
"access" : "public",
"url" : "http://example.com/article/1/pdf"
}
],
"embargo" : {
"end" : "2016-01-01T00:00:00Z",
"start" : "2015-01-01T00:00:00Z",
"duration" : 12
},
"metadata" : {
"title" : "Test Article",
"version" : "AAM",
"publisher" : "Premier Publisher",
"source" : {
"name" : "Journal of Important Things",
"identifier" : [
{"type" : "issn", "id" : "1234-5678" },
{"type" : "eissn", "id" : "1234-5678" },
{"type" : "pissn", "id" : "9876-5432" },
{"type" : "doi", "id" : "10.pp/jit" }
]
},
"identifier" : [
{"type" : "doi", "id" : "10.pp/jit.1" }
],
"type" : "article",
"author" : [
{
"name" : "Richard Jones",
"identifier" : [
{"type" : "orcid", "id" : "aaaa-0000-1111-bbbb"},
{"type" : "email", "id" : "richard@example.com"},
],
"affiliation" : "Cottage Labs, HP3 9AA"
},
{
"name" : "Mark MacGillivray",
"identifier" : [
{"type" : "orcid", "id" : "dddd-2222-3333-cccc"},
{"type" : "email", "id" : "mark@example.com"},
],
"affiliation" : "Cottage Labs, EH9 5TP"
}
],
"language" : "eng",
"publication_date" : "2015-01-01T00:00:00Z",
"date_accepted" : "2014-09-01T00:00:00Z",
"date_submitted" : "2014-07-03T00:00:00Z",
"license_ref" : {
"title" : "CC BY",
"type" : "CC BY",
"url" : "http://creativecommons.org/cc-by",
"version" : "4.0",
},
"project" : [
{
"name" : "BBSRC",
"identifier" : [
{"type" : "ringold", "id" : "bbsrcid"}
],
"grant_number" : "BB/34/juwef"
}
],
"subject" : ["science", "technology", "arts", "medicine"]
}
}
"""Example base notification"""
NOTIFICATION_LIST = {
"since" : None,
"page" : None,
"pageSize" : None,
"timestamp" : None,
"total" : 1,
"notifications" : []
}
"""structure for notification list"""
OUTGOING = {
"id" : "1234567890",
"created_date" : "2015-02-02T00:00:00Z",
"analysis_date" : "2015-02-02T00:00:00Z",
"event" : "submission",
"content" : {
"packaging_format" : "https://pubrouter.jisc.ac.uk/FilesAndJATS",
},
"links" : [
{
"type" : "splash",
"format" : "text/html",
"url" : "http://router.jisc.ac.uk/api/v1/notification/1234567890/content/1"
},
{
"type" : "fulltext",
"format" : "application/pdf",
"url" : "http://router.jisc.ac.uk/api/v1/notification/1234567890/content/2"
},
{
"type" : "package",
"format" : "application/zip",
"url" : "http://router.jisc.ac.uk/api/v1/notification/1234567890/content/SimpleZip",
"packaging" : "http://purl.org/net/sword/package/SimpleZip"
},
{
"type" : "package",
"format" : "application/zip",
"url" : "http://router.jisc.ac.uk/api/v1/notification/1234567890/content",
"packaging" : "https://pubrouter.jisc.ac.uk/FilesAndJATS"
}
],
"embargo" : {
"end" : "2016-01-01T00:00:00Z",
"start" : "2015-01-01T00:00:00Z",
"duration" : 12
},
"metadata" : {
"title" : "Test Article",
"version" : "AAM",
"publisher" : "Premier Publisher",
"source" : {
"name" : "Journal of Important Things",
"identifier" : [
{"type" : "issn", "id" : "1234-5678" },
{"type" : "eissn", "id" : "1234-5678" },
{"type" : "pissn", "id" : "9876-5432" },
{"type" : "doi", "id" : "10.pp/jit" }
]
},
"identifier" : [
{"type" : "doi", "id" : "10.pp/jit.1" }
],
"type" : "article",
"author" : [
{
"name" : "Richard Jones",
"identifier" : [
{"type" : "orcid", "id" : "aaaa-0000-1111-bbbb"},
{"type" : "email", "id" : "richard@example.com"},
],
"affiliation" : "Cottage Labs"
},
{
"name" : "Mark MacGillivray",
"identifier" : [
{"type" : "orcid", "id" : "dddd-2222-3333-cccc"},
{"type" : "email", "id" : "mark@example.com"},
],
"affiliation" : "Cottage Labs"
}
],
"language" : "eng",
"publication_date" : "2015-01-01T00:00:00Z",
"date_accepted" : "2014-09-01T00:00:00Z",
"date_submitted" : "2014-07-03T00:00:00Z",
"license_ref" : {
"title" : "CC BY",
"type" : "CC BY",
"url" : "http://creativecommons.org/cc-by",
"version" : "4.0",
},
"project" : [
{
"name" : "BBSRC",
"identifier" : [
{"type" : "ringold", "id" : "bbsrcid"}
],
"grant_number" : "BB/34/juwef"
}
],
"subject" : ["science", "technology", "arts", "medicine"]
}
}
"""example outgoing notification"""
SPECIAL_CHARACTER = {
"embargo": {
"duration": 0
},
"links": [{
"url": "https://pubrouter.jisc.ac.uk/api/v1/notification/4e8f4bef41254539a28e072c1e85d9a2/proxy/d0ec9aa8125f4e81aede7dfeaa5bc9e2",
"type": "fulltext",
"format": "text/html"
}],
"analysis_date": "2016-02-23T22:41:47Z",
"event": "acceptance",
"created_date": "2016-02-23T22:35:46Z",
"id": "4e8f4bef41254539a28e072c1e85d9a2",
"metadata": {
"language": "eng",
"title": "Relationship between maxillary central incisor proportions and\u00a0facial proportions.".decode("unicode_escape"),
"author": [{
"affiliation": "Senior Specialty Registrar, Orthodontics, Guy's Hospital, London, UK.",
"identifier": [{
"type": "ORCID",
"id": "0000-0003-2731-183X"
}],
"name": "Radia S"
},
{
"affiliation": "Professor, Biostatistics, University of Bristol, Bristol, UK.",
"name": "Sherriff M"
},
{
"affiliation": "Professor and Head, Department of Orthodontics, King's College University, London, UK.",
"name": "McDonald F"
},
{
"affiliation": "Consultant Orthodontist, Kingston and St George's Hospitals and Medical School, London, UK. Electronic address: farhad.naini@yahoo.co.uk.",
"name": "Naini FB"
}],
"source": {
"identifier": [{
"type": "issn",
"id": "0022-3913"
},
{
"type": "eissn",
"id": "1097-6841"
}],
"name": "The Journal of prosthetic dentistry"
},
"publication_date": "2016-01-12T00:00:00Z",
"identifier": [{
"type": "10.1016/j.prosdent.2015.10.019",
"id": "doi"
},
{
"type": "26794701",
"id": "pmid"
}],
"type": "Journal Article"
}
}
|
JiscPER/jper-sword-out
|
service/tests/fixtures/notifications.py
|
Python
|
apache-2.0
| 11,432
|
[
"Octopus"
] |
c184ff7c40f6fbdd01ba4cba06bda52a31c5789b2ad810f912c760d5620e1cf1
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
What it does
============
B{This is the 7th step of ZIBgridfree.}
This tool will perform a direct reweighting of the sampling nodes. It should be run on a pool of converged nodes. From the reweighted partial distributions that were sampled in the individual nodes one can reconstruct the overall Boltzmann distribution.
B{The next step is L{zgf_analyze}.}
How it works
============
At the command line, type::
$ zgf_reweight [options]
Direct reweighting strategies
=============================
Currently, three different direct reweighting approaches are implemented:
- Direct free energy reweighting: see Klimm, Bujotzek, Weber 2011
- Entropy reweighting: see Klimm, Bujotzek, Weber 2011
- Presampling analysis reweighting: see formula 18 in Fackeldey, Durmaz, Weber 2011
B{Entropy reweighting "entropy"}
Choice of the evaluation region: The size of the evaluation region is chosen as large as the variance of the internal coordinate time series (obtained from the sampling trajectory), averaged over all nodes.
Choice of reference points by energy region: Instead of specifying a fixed number of reference points, we follow the approach of M. Weber, K. Andrae: A simple method for the estimation of entropy differences. MATCH Comm. Math. Comp. Chem. 63(2):319-332, 2010. This means we are picking a dynamic number of reference points by declaring all the sampling points within a certain energy region (which we choose as the energy standard deviation) around the mean potential energy of the system as reference points.
Finding of near points: Our measure of conformational entropy is based on an estimate of the sampling density. For this purpose, we look for sampling points adjacent to our reference points and denote them as 'near points'. As each reference points counts as its own near point, the number of near points can never be zero.
Calculation of entropy and free energy: The number of (inverse) near points enters directly into the calculation of entropy for the corresponding node. Finally, thermodynamic weights are derived from the free energy differences. Some useful information regarding reference points, their adjacent near points and energy averages will be stored in the reweighting log file.
Frame weights
=============
As in Gromacs we use simple harmonic restraint potentials to approximate the original radial basis functions used in ZIBgridfree, we have to perform a frame reweighting of the sampling trajectories afterwards. The frame weight of each individual frame $q$ belonging to node $i$ is calculated as:
\[ \mathtt{frame\_weight}_i(q)=\\frac{\phi_i(q)}{\exp(-\\beta \cdot U_{res}(q))}, \]
where $U_{res}(q)$ is the GROMACS restraint potential in frame $q$. Frame weights should yield values between zero and one. Slightly higher values than one are feasible. Note that frame weights are not normalized to one.
Overweight frames are possible if $\phi_i(q)$ is high (meaning that $q$ is well within its native basis function) while the penalty_potential $U_{res}(q)$ is high, as well. Hence, $q$ is punished wrongly, as $q$ should only be punished by the penalty potential if it attempts to leave its native basis function.
When overweight frames occur, this probably means that your approximation of the $\phi$ function for the corresponding node is bad. You can check this by using L{zgf_browser}. If the penalty potential kicks in where $\phi$ is still good, you have got a bad approximation of the $\phi$ function. Overweight frame weights will trigger a WARNING. Furthermore, any occurence of overweight frame weights will be stored in the reweighting log file.
Choice of energy observables for reweighting
============================================
You can pick from various options. You can decide if you want to use observables from the standard run (as stored in 'ener.edr') or from a rerun (as stored in 'rerun.edr') that you did with L{zgf_rerun}. You can also read bonded and non-bonded energy observables from different edr-files. If you are not happy with the standard choice of energy observables, you can provide a file with costum observables (non-bonded only).
Check restraint energy
======================
This option is mainly for debugging. It compares wether ZIBMolPy internally calculates the same restraint energies as Gromacs (as stored in the edr-file of the run). You can also compare ZIBMolPy and Gromacs restraint energies visually by using the FrameWeightPlot in L{zgf_browser}.
"""
from ZIBMolPy.constants import AVOGADRO, BOLTZMANN
from ZIBMolPy.restraint import DihedralRestraint, DistanceRestraint
from ZIBMolPy.ui import Option, OptionsList
from ZIBMolPy.phi import get_phi, get_phi_potential
from ZIBMolPy.pool import Pool
import zgf_cleanup
from subprocess import Popen, PIPE, call, check_call
from datetime import datetime
from tempfile import mktemp
from warnings import warn
import numpy as np
import sys
import os
from shutil import copy
from os import path
CRITICAL_FRAME_WEIGHT = 5.0
options_desc = OptionsList([
Option("s", "seq", "bool", "Suppress MPI", default=False),
Option("p", "np", "int", "Number of processors to be used for MPI", default=4, min_value=1),
Option("c", "ignore-convergence", "bool", "reweight despite not-converged", default=False),
Option("f", "ignore-failed", "bool", "reweight and ignore mdrun-failed nodes", default=False),
Option("m", "method", "choice", "reweighting method", choices=("entropy", "direct", "presampling")),
Option("z", "reminimize", "bool", "reminimize presampling frames", default=False),
Option("b", "e-bonded", "choice", "bonded energy type", choices=("run_standard_potential", "run_standard_bondedterms", "rerun_standard_potential", "rerun_standard_bondedterms", "none")),
Option("n", "e-nonbonded", "choice", "nonbonded energy type", choices=("none", "run_standard_nonbondedterms", "run_moi", "run_moi_sol_sr", "run_moi_sol_lr", "run_custom", "rerun_standard_nonbondedterms", "run_moi_sol_interact", "rerun_moi_sol_interact", "run_moi_sol_interact_withLR", "rerun_moi_sol_interact_withLR", "rerun_moi", "rerun_moi_sol_sr", "rerun_moi_sol_lr", "rerun_custom")),
Option("e", "custom-energy", "file", extension="txt", default="custom_energy.txt"),
Option("t", "presamp-temp", "float", "presampling temp", default=1000), #TODO maybe drop this and ask user instead... method has to be reworked anyway
Option("r", "save-refpoints", "bool", "save refpoints in observables", default=False),
Option("R", "check-restraint", "bool", "check if ZIBMolPy calculates the same restraint energy as Gromacs", default=False),
])
sys.modules[__name__].__doc__ += options_desc.epytext() # for epydoc
def is_applicable():
pool = Pool()
return( len(pool) > 1 and len(pool.where("isa_partition and state in ('converged','not-converged','mdrun-failed')")) == len(pool.where("isa_partition")) )
#===============================================================================
def main():
options = options_desc.parse_args(sys.argv)[0]
zgf_cleanup.main()
pool = Pool()
not_reweightable = "isa_partition and state not in ('converged'"
if(options.ignore_convergence):
not_reweightable += ",'not-converged'"
if(options.ignore_failed):
not_reweightable += ",'mdrun-failed'"
not_reweightable += ")"
if pool.where(not_reweightable):
print "Pool can not be reweighted due to the following nodes:"
for bad_guy in pool.where(not_reweightable):
print "Node %s with state %s."%(bad_guy.name, bad_guy.state)
sys.exit("Aborting.")
active_nodes = pool.where("isa_partition and state != 'mdrun-failed'")
assert(len(active_nodes) == len(active_nodes.multilock())) # make sure we lock ALL nodes
if(options.check_restraint):
for n in active_nodes:
check_restraint_energy(n)
if(options.method == "direct"):
reweight_direct(active_nodes, options)
elif(options.method == "entropy"):
reweight_entropy(active_nodes, options)
elif(options.method == "presampling"):
reweight_presampling(active_nodes, options)
else:
raise(Exception("Method unkown: "+options.method))
weight_sum = np.sum([n.tmp['weight'] for n in active_nodes])
print "Thermodynamic weights calculated by method '%s':"%options.method
for n in active_nodes:
n.obs.weight_direct = n.tmp['weight'] / weight_sum
if(options.method == "direct"):
print(" %s with mean_V: %f [kJ/mol], %d refpoints and weight: %f" % (n.name, n.obs.mean_V, n.tmp['n_refpoints'], n.obs.weight_direct))
else:
print(" %s with A: %f [kJ/mol] and weight: %f" % (n.name, n.obs.A, n.obs.weight_direct))
print "The above weighting uses bonded energies='%s' and nonbonded energies='%s'."%(options.e_bonded, options.e_nonbonded)
for n in active_nodes:
n.save()
active_nodes.unlock()
#===============================================================================
def reweight_direct(nodes, options):
print "Direct free energy reweighting: see Klimm, Bujotzek, Weber 2011"
custom_energy_terms = None
if(options.e_nonbonded in ("run_custom", "rerun_custom")):
assert(path.exists(options.custom_energy))
custom_energy_terms = [entry.strip() for entry in open(options.custom_energy).readlines() if entry != "\n"]
beta = nodes[0].pool.thermo_beta
for n in nodes:
# get potential V and substract penalty potential
energies = load_energy(n, options.e_bonded, options.e_nonbonded, custom_energy_terms)
frame_weights = n.frameweights
phi_weighted_energies = energies + get_phi_potential(n.trajectory, n)
# define evaluation region where sampling is rather dense, e. g. around mean potential energy with standard deviation of potential energy
n.obs.mean_V = np.average(phi_weighted_energies, weights=frame_weights)
n.obs.std_V = np.sqrt(np.average(np.square(phi_weighted_energies - n.obs.mean_V), weights=frame_weights))
n.tmp['weight'] = 0.0
# new part
mean_mean_V = np.mean([n.obs.mean_V for n in nodes])
std_mean_V = np.sqrt(np.mean([np.square(n.obs.mean_V - mean_mean_V) for n in nodes]))
print "### original std_mean_V: %f"%std_mean_V
print "### mean over obs.std_V: %f"%np.mean([n.obs.std_V for n in nodes]) #TODO decide upon one way to calculate std_mean_V
energy_region = std_mean_V
for n in nodes:
refpoints = np.where(np.abs(phi_weighted_energies - n.obs.mean_V) < energy_region)[0]
n.tmp['n_refpoints'] = len(refpoints)
log = open(n.reweighting_log_fn, "a") # using separate log-file
def output(message):
print(message)
log.write(message+"\n")
output("======= Starting node reweighting %s"%datetime.now())
output(" unweighted mean V: %s [kJ/mol], without penalty potential" % np.mean(energies))
output(" phi-weighted mean V: %s [kJ/mol], without penalty potential" % np.mean(phi_weighted_energies))
output(" weighted mean V: %f [kJ/mol]" % n.obs.mean_V)
output(" energy region (=tenth of weighted V standard deviaton): %f [kJ/mol]" % energy_region)
output(" number of refpoints: %d" % n.tmp['n_refpoints'])
# calculate weights with direct ansatz
for ref in refpoints:
n.tmp['weight'] += np.exp(beta*phi_weighted_energies[ref])
n.tmp['weight'] = float(n.trajectory.n_frames) / float(n.tmp['weight'])
n.obs.S = 0.0
n.obs.A = 0.0
if(options.save_refpoints):
n.obs.refpoints = refpoints
log.close()
#===============================================================================
def reweight_entropy(nodes, options):
print "Entropy reweighting: see Klimm, Bujotzek, Weber 2011"
custom_energy_terms = None
if(options.e_nonbonded in ("run_custom", "rerun_custom")):
assert(path.exists(options.custom_energy))
custom_energy_terms = [entry.strip() for entry in open(options.custom_energy).readlines() if entry != "\n"]
# calculate variance of internal coordinates
conjugate_var = np.mean([n.trajectory.merged_var_weighted() for n in nodes]) # this be our evaluation region
# find refpoints and calculate nearpoints
for n in nodes:
log = open(n.reweighting_log_fn, "a") # using separate log-file
def output(message):
print(message)
log.write(message+"\n")
output("======= Starting node reweighting %s"%datetime.now())
# get potential V and substract penalty potential
energies = load_energy(n, options.e_bonded, options.e_nonbonded, custom_energy_terms)
frame_weights = n.frameweights
phi_weighted_energies = energies + get_phi_potential(n.trajectory, n)
# calculate mean V
n.obs.mean_V = np.average(phi_weighted_energies, weights=frame_weights)
n.tmp['weight'] = 1.0
n.obs.std_V = np.sqrt(np.average(np.square(phi_weighted_energies - n.obs.mean_V), weights=frame_weights))
# every frame within this region is considered refpoint
energy_region = n.obs.std_V
refpoints = np.where(np.abs(phi_weighted_energies - n.obs.mean_V) < energy_region)[0]
output(" unweighted mean V: %s [kJ/mol], without penalty potential" % np.mean(energies))
output(" phi-weighted mean V: %s [kJ/mol], without penalty potential" % np.mean(phi_weighted_energies))
output(" weighted mean V: %f [kJ/mol]" % n.obs.mean_V)
output(" energy region (=weighted V standard deviation): %f [kJ/mol]" % energy_region)
output(" evaluation region (=conjugate variance): %f" % conjugate_var)
output(" number of refpoints: %d" % len(refpoints))
if( len(refpoints) == 0 ):
raise(Exception("Zero refpoints for "+n.name+" ["+n.trr_fn+"]."))
norm_inv_nearpoints = []
for ref in refpoints: # for each refpoint count nearpoints
diffs = (n.trajectory - n.trajectory.getframe(ref)).norm2() #TODO -> needs Marcus-check -> Do we have to consider Frame-weights here?
nearpoints = np.sum(diffs < conjugate_var)
#output(" refpoint %d with energy %f has %d nearpoints" % (ref, phi_weighted_energies[ref], nearpoints))
if(nearpoints == 1):
output("WARNING: No nearpoints found for refpoint %d! (%s)" % (ref, n.name))
norm_inv_nearpoints.append( float(n.trajectory.n_frames)/float(nearpoints) ) # new calculation formula (see wiki), +1 is implicit as refpoint counts as nearpoint
n.tmp['medi_inv_nearpoints'] = np.median(norm_inv_nearpoints)
n.obs.S = AVOGADRO*BOLTZMANN*np.log(n.tmp['medi_inv_nearpoints']) # [kJ/mol*K]
n.obs.A = n.obs.mean_V - nodes[0].pool.temperature*n.obs.S # [kJ/mol]
if(options.save_refpoints):
n.obs.refpoints = refpoints
log.close()
nodes.sort(key = lambda n: n.obs.A) # sort in ascending order by free energy values
for (n1, n2) in zip(nodes[1:], nodes[:-1]): # calculate and normalize weights
n1.tmp['weight'] = np.exp(-nodes[0].pool.thermo_beta*( n1.obs.A - n2.obs.A )) * n2.tmp['weight']
#===============================================================================
def reweight_presampling(nodes, options):
print "Presampling analysis reweighting: see formula 18 in Fackeldey, Durmaz, Weber 2011"
custom_energy_terms = None
if(options.e_nonbonded in ("run_custom", "rerun_custom")):
assert(path.exists(options.custom_energy))
custom_energy_terms = [entry.strip() for entry in open(options.custom_energy).readlines() if entry != "\n"]
root = nodes[0].pool.root
# presampling data
presampling_internals = root.trajectory
# presampling and sampling beta
beta_samp = root.pool.thermo_beta
beta_presamp = 1/(options.presamp_temp*BOLTZMANN*AVOGADRO)
# Calculating energies of all presampling frames
cmd0 = ["grompp"]
cmd0 += ["-f", "../../"+root.pool.mdp_fn]
cmd0 += ["-n", "../../"+root.pool.ndx_fn]
cmd0 += ["-c", "../../"+root.pdb_fn]
cmd0 += ["-p", "../../"+root.pool.top_fn]
cmd0 += ["-o", "../../"+root.dir+"/run_temp.tpr"]
print("Calling: %s"%" ".join(cmd0))
p = Popen(cmd0, cwd=root.dir)
retcode = p.wait()
assert(retcode == 0) # grompp should never fail
os.rename(root.dir+"/run_temp.tpr",root.tpr_fn)
# rerun mdrun
cmd = ["mdrun"]
cmd += ["-s", "../../" + root.tpr_fn]
cmd += ["-rerun", "../../" + root.trr_fn]
p = Popen(cmd, cwd=root.dir)
assert(p.wait() == 0)
# remove unnecessary files
os.remove(root.dir + "/traj.trr")
# extract potential energy V of presampling frames
energies = load_energy(root, options.e_bonded, options.e_nonbonded, custom_energy_terms)
# Beginning minimizations starting from every presampling frame
mins_dir = root.dir + "/mins"
if not os.path.isdir(mins_dir):
os.mkdir(mins_dir)
copy (root.pool.mdp_fn, mins_dir + "/min.mdp")
# change configuration to minimization
with open(mins_dir + "/min.mdp", "r+") as f:
c = f.read()
pos = c.find("integrator")
assert (pos != -1) # integrator should be defined
c = c[:pos] + ";" +c[pos:] + "\nintegrator = steep\nnstcgsteep = 10"
f.seek(0)
f.truncate()
f.write(c)
f.close()
# loading times for starting minimizatioon from certain frame in trajectory
cmd2 = ["g_energy", "-dp", "-f", "ener.edr"]
print "Loading times of the presampling frames"
p = Popen (cmd2, cwd=root.dir, stdin=PIPE)
p.communicate("1\n")
assert (p.wait() == 0)
times = np.loadtxt(root.dir + "/energy.xvg", comments="@", skiprows=10, usecols=[0])
phi_mat = get_phi_mat(presampling_internals, nodes)
presamp_partition = np.argmax(phi_mat, axis=1)
# grompp command for every frame
cmd3 = ["grompp", "-f", "min.mdp", "-c", "../../../" + root.pdb_fn]
cmd3 += ["-t", "../../../" + root.trr_fn]
cmd3 += ["-n", "../../../" + root.pool.ndx_fn]
if (not hasattr(root, "reweight_minimized") or options.reminimize):
print "Running grompp to prepare for the minimizations"
print "Running mdrun for every presampling frame"
for i in xrange(times.size):
cmds = cmd3 + ["-p", "../../../" + nodes[presamp_partition[i]].top_fn]
cmds += ["-time", str(times[i])]
cmds += ["-o", "run" + str(i) + ".tpr"]
cmds += ["-po", "mdout" + str(i) + ".mdp"]
p = Popen (cmds, cwd = mins_dir)
assert(p.wait() == 0)
os.remove(mins_dir + "/mdout" + str(i) + ".mdp")
# minimization from all frames
cmd4 =["mdrun"]
cmd4 += ["-s", "run" + str(i) + ".tpr"]
cmd4 += ["-g", "md" + str(i) + ".log"]
cmd4 += ["-e", "ener" + str(i) + ".edr"]
cmd4 += ["-o", "traj" + str(i) + ".trr"]
cmd4 += ["-c", "confout" + str(i) + ".gro"]
# use mpiexec and mdrun_mpi if available
if(not options.seq and call(["which","mpiexec"])==0):
if(call(["which","mdrun_mpi"])==0):
cmd4[0] = "mdrun_mpi"
cmd4 = ["mpiexec", "-np", str(options.np)] + cmd4
#http://stackoverflow.com/questions/4554767/terminating-subprocess-in-python
#alternative
#p = Popen(...)
#pp = psutil.Process(p.pid)
#for child in pp.get_children():
# child.send_signal(signal.SIGINT)
#ensure, that childprocess dies when parent dies. Alternative: write own signal-handler e.g for atexit-module
#http://stackoverflow.com/questions/1884941/killing-the-child-processes-with-the-parent-process
implant_bomb = None
try:
import ctypes
libc = ctypes.CDLL('libc.so.6')
PR_SET_PDEATHSIG = 1; TERM = 15
implant_bomb = lambda: libc.prctl(PR_SET_PDEATHSIG, TERM)
except:
warn("Child process might live on when parent gets terminated (feature requires python 2.6).")
print("Calling: %s"%" ".join(cmd4))
check_call(cmd4, cwd=mins_dir, preexec_fn=implant_bomb)
os.remove(mins_dir + "/traj" + str(i) + ".trr")
os.remove(mins_dir + "/confout" + str(i) + ".gro")
os.remove(mins_dir + "/md" + str(i) + ".log")
root.reweight_minimized = True
root.lock()
root.save()
root.unlock()
# stores according minimum in nodes
min_per_node(mins_dir, presamp_partition, nodes, options.e_bonded, options.e_nonbonded, custom_energy_terms)
# calculate free energy per node
for i in xrange(len(nodes)):
log = open(nodes[i].reweighting_log_fn, "a") # using separate log-file
def output(message):
print(message)
log.write(message+"\n")
output("======= Starting node reweighting %s"%datetime.now())
phi_values = phi_mat[:,i]
phi_sum = np.sum(phi_values)
# calculate mean V
nodes[i].obs.mean_V = np.average(energies, weights=phi_values/phi_sum)
nodes[i].tmp['weight'] = 1.0
# number of presampling points in node i => free energy at high temperature
nodes[i].tmp['presamp_A']= -1/beta_presamp * np.log(phi_sum)
# compute free energy and entropy at sampling temperature
# n.obs.S = 0.0 #TODO can we get separate entropy from the term below?
beta_rel = beta_presamp / beta_samp
nodes[i].obs.A = (1 - beta_rel) * nodes[i].tmp["opt_pot_e"] + beta_rel * np.log(1/beta_rel) * (nodes[i].obs.mean_V - nodes[i].tmp["opt_pot_e"]) + beta_rel * nodes[i].tmp['presamp_A']
if('refpoints' in nodes[i].obs):
del nodes[i].obs['refpoints']
log.close()
nodes.sort(key = lambda n: n.obs.A) # sort in ascending order by free energy values
for (n1, n2) in zip(nodes[1:], nodes[:-1]): # calculate and normalize weights
n1.tmp['weight'] = np.exp(-nodes[0].pool.thermo_beta*( n1.obs.A - n2.obs.A )) * n2.tmp['weight']
#===============================================================================
def load_energy(node, e_bonded_type, e_nonbonded_type, custom_e_terms=None):
if(e_bonded_type != "none"):
# get bonded energy
if(e_bonded_type in ("run_standard_bondedterms", "run_standard_potential") ):
edr_fn = "ener.edr"
elif(e_bonded_type in ("rerun_standard_bondedterms", "rerun_standard_potential")):
edr_fn = "rerun.edr"
else:
raise(Exception("Method unkown: "+e_bonded_type))
if(e_bonded_type in ("run_standard_potential", "rerun_standard_potential")):
e_bonded_terms = ["Potential"]
if(e_bonded_type in ("run_standard_bondedterms", "rerun_standard_bondedterms")):
e_bonded_terms = ["Bond", "Angle", "Proper-Dih.", "Ryckaert-Bell.", "Improper-Dih."]
xvg_fn = mktemp(suffix=".xvg", dir=node.dir)
cmd = ["g_energy", "-dp", "-f", node.dir+"/"+edr_fn, "-o", xvg_fn, "-sum"]
print("Calling: "+(" ".join(cmd)))
p = Popen(cmd, stdin=PIPE)
p.communicate(input=("\n".join(e_bonded_terms)+"\n"))
assert(p.wait() == 0)
# skipping over "#"-comments at the beginning of xvg-file
e_bonded = np.loadtxt(xvg_fn, comments="@", usecols=(1,), skiprows=10)
os.remove(xvg_fn)
# if len(energy file) != len(trajectory)
if(len(e_bonded)==node.trajectory.n_frames+1):
e_bonded = e_bonded[:-1]
else:
e_bonded = np.zeros(node.trajectory.n_frames)
if(e_nonbonded_type != "none"):
# get non-bonded energy
if(e_nonbonded_type in ("run_standard_nonbondedterms","run_moi_sol_interact_withLR","run_moi","run_moi_sol_sr","run_moi_sol_lr","run_custom")):
edr_fn = "ener.edr"
elif(e_nonbonded_type in ("rerun_standard_nonbondedterms","rerun_moi_sol_interact_withLR","rerun_moi","rerun_moi_sol_sr","rerun_moi_sol_lr","rerun_custom")):
edr_fn = "rerun.edr"
else:
raise(Exception("Method unkown: "+e_nonbonded_type))
if(e_nonbonded_type in ("run_standard_nonbondedterms", "rerun_standard_nonbondedterms")):
e_nonbonded_terms = ["LJ-14", "Coulomb-14", "LJ-(SR)", "LJ-(LR)", "Disper.-corr.", "Coulomb-(SR)", "Coul.-recip."]
if(e_nonbonded_type in ("run_moi", "rerun_moi")):
e_nonbonded_terms = ["Coul-SR:MOI-MOI", "LJ-SR:MOI-MOI", "LJ-LR:MOI-MOI", "Coul-14:MOI-MOI", "LJ-14:MOI-MOI"]
if(e_nonbonded_type in ("run_moi_sol_interact", "rerun_moi_sol_interact")):
e_nonbonded_terms = ["Coul-SR:MOI-SOL", "LJ-SR:MOI-SOL"]
if(e_nonbonded_type in ("run_moi_sol_interact_withLR", "rerun_moi_sol_interact_withLR")):
#e_nonbonded_terms = ["Coul-SR:MOI-SOL", "LJ-SR:MOI-SOL", "LJ-LR:MOI-SOL"]
e_nonbonded_terms = ["Coul-SR:SOL-UNK", "LJ-SR:SOL-UNK", "LJ-LR:SOL-UNK"]
if(e_nonbonded_type in ("run_moi_sol_sr", "rerun_moi_sol_sr")):
e_nonbonded_terms = ["Coul-SR:MOI-MOI", "LJ-SR:MOI-MOI", "LJ-LR:MOI-MOI", "Coul-14:MOI-MOI", "LJ-14:MOI-MOI", "Coul-SR:MOI-SOL", "LJ-SR:MOI-SOL"]
if(e_nonbonded_type in ("run_moi_sol_lr", "rerun_moi_sol_lr")):
e_nonbonded_terms = ["Coul-SR:MOI-MOI", "LJ-SR:MOI-MOI", "LJ-LR:MOI-MOI", "Coul-14:MOI-MOI", "LJ-14:MOI-MOI", "Coul-SR:MOI-SOL", "LJ-SR:MOI-SOL", "LJ-LR:MOI-SOL"]
if(e_nonbonded_type in ("run_custom", "rerun_custom")):
assert(custom_e_terms)
e_nonbonded_terms = custom_e_terms
xvg_fn = mktemp(suffix=".xvg", dir=node.dir)
cmd = ["g_energy", "-dp", "-f", node.dir+"/"+edr_fn, "-o", xvg_fn, "-sum"]
print("Calling: "+(" ".join(cmd)))
p = Popen(cmd, stdin=PIPE)
p.communicate(input=("\n".join(e_nonbonded_terms)+"\n"))
assert(p.wait() == 0)
# skipping over "#"-comments at the beginning of xvg-file
e_nonbonded = np.loadtxt(xvg_fn, comments="@", usecols=(1,), skiprows=10)
os.remove(xvg_fn)
# if len(energy file) != len(trajectory)
if(len(e_nonbonded)==node.trajectory.n_frames+1):
e_nonbonded = e_nonbonded[:-1]
else:
e_nonbonded = np.zeros(node.trajectory.n_frames)
print len(e_bonded)
print len(e_nonbonded)
print (node.trajectory.n_frames)
assert(len(e_bonded) == len(e_nonbonded) == node.trajectory.n_frames)
return(e_bonded+e_nonbonded)
#===============================================================================
def check_restraint_energy(node):
""" Uses U{g_energy <http://www.gromacs.org/Documentation/Gromacs_Utilities/g_energy>}
to read the distance- and dihedral-restraint energies
used by gromacs for every frame of the node's trajectory and compares them
with our own values, which are calulated in L{ZIBMolPy.restraint}.
This is a safety measure to ensure that L{ZIBMolPy.restraint} is consistent with gromacs."""
#TODO: move next two lines into Node?
has_dih_restraints = any([isinstance(r, DihedralRestraint) for r in node.restraints])
has_dis_restraints = any([isinstance(r, DistanceRestraint) for r in node.restraints])
# Caution: g_energy ignores the given order of energy_terms and instead uses its own
energy_terms = []
if(has_dis_restraints):
#energy_terms += ["Dis.-Rest."]
energy_terms += ["Restraint-Pot."]
if(has_dih_restraints):
energy_terms += ["Dih.-Rest."]
#xvg_fn = mktemp(suffix=".xvg", dir=node.dir)
xvg_fn = mktemp(suffix=".xvg")
cmd = ["g_energy", "-dp", "-f", node.dir+"/ener.edr", "-o", xvg_fn]
print("Calling: "+(" ".join(cmd)))
p = Popen(cmd, stdin=PIPE)
p.communicate(input=("\n".join(energy_terms)+"\n"))
assert(p.wait() == 0)
# skipping over "#"-comments at the beginning of xvg-file
energies = np.loadtxt(xvg_fn, comments="@", skiprows=10)
os.remove(xvg_fn)
assert(energies.shape[0] == node.trajectory.n_frames)
assert(energies.shape[1] == len(energy_terms)+1)
dih_penalty = np.zeros(node.trajectory.n_frames)
dis_penalty = np.zeros(node.trajectory.n_frames)
for i, r in enumerate(node.restraints):
p = r.energy(node.trajectory.getcoord(i))
if(isinstance(r, DihedralRestraint)):
dih_penalty += p
elif(isinstance(r, DistanceRestraint)):
dis_penalty += p
else:
warn("Unkown Restraint-type: "+str(r))
dih_penalty_gmx = np.zeros(node.trajectory.n_frames)
dis_penalty_gmx = np.zeros(node.trajectory.n_frames)
if(has_dih_restraints):
i = energy_terms.index("Dih.-Rest.") + 1 # index 0 = time of frame
dih_penalty_gmx = energies[:,i]
dih_diffs = np.abs(dih_penalty - dih_penalty_gmx)
max_diff = np.argmax(dih_diffs)
dih_diff = dih_diffs[max_diff]
bad_diff = max(0.006*energies[max_diff,i], 0.006)
print "dih_diff: ", dih_diff
assert(dih_diff < bad_diff) #TODO: is this reasonable? deviations tend to get bigger with absolute energy value, so I think yes
# TODO compare if we get the same dihedral angles from g_angle as we get from our internals
if(has_dis_restraints):
#i = energy_terms.index("Dis.-Rest.") + 1 # index 0 = time of frame
i = energy_terms.index("Restraint-Pot.") + 1 # index 0 = time of frame
dis_penalty_gmx = energies[:,i]
dis_diff = np.max(np.abs(dis_penalty - dis_penalty_gmx))
print "dis_diff: ", dis_diff
assert(dis_diff < 1e-3)
return( dih_penalty_gmx + dis_penalty_gmx ) # values are returned for optional plotting
def min_per_node(mins_dir, partition, nodes, e_bonded_type, e_nonbonded_type, custom_e_terms=None):
""" Calculates the minium energy of the presampling frames belonging to the according node. A frame belongs to the node to which it has the strongest membership.
@param mins_dir: directory in which the minimizations required for the presampling reweighting are performed
@param partition: 1D array, at index i is the index of the node corresponding to presampling frame i"""
mins = [[]] * len(nodes)
presamp_int = nodes[0].pool.root.trajectory
for i in xrange(partition.size):
edr_fn = mins_dir + "/ener" + str(i) + ".edr"
e_terms = []
if(e_bonded_type != "none"):
# get bonded energy
if(e_bonded_type in ("run_standard_potential", "rerun_standard_potential")):
e_terms += ["Potential"]
elif(e_bonded_type in ("run_standard_bondedterms", "rerun_standard_bondedterms")):
e_terms += ["Bond", "Angle", "Proper-Dih.", "Ryckaert-Bell.", "Improper-Dih."]
else:
raise(Exception("Method unkown: "+e_bonded_type))
if(e_nonbonded_type != "none"):
# get non-bonded energy
if(e_nonbonded_type in ("run_standard_nonbondedterms", "rerun_standard_nonbondedterms")):
e_terms += ["LJ-14", "Coulomb-14", "LJ-(SR)", "LJ-(LR)", "Disper.-corr.", "Coulomb-(SR)", "Coul.-recip."]
elif(e_nonbonded_type in ("run_moi", "rerun_moi")):
e_terms += ["Coul-SR:MOI-MOI", "LJ-SR:MOI-MOI", "LJ-LR:MOI-MOI", "Coul-14:MOI-MOI", "LJ-14:MOI-MOI"]
elif(e_nonbonded_type in ("run_moi_sol_interact", "rerun_moi_sol_interact")):
e_terms += ["Coul-SR:MOI-SOL", "LJ-SR:MOI-SOL"]
elif(e_nonbonded_type in ("run_moi_sol_interact_withLR", "rerun_moi_sol_interact_withLR")):
#e_nonbonded_terms = ["Coul-SR:MOI-SOL", "LJ-SR:MOI-SOL", "LJ-LR:MOI-SOL"]
e_terms += ["Coul-SR:SOL-UNK", "LJ-SR:SOL-UNK", "LJ-LR:SOL-UNK"]
elif(e_nonbonded_type in ("run_moi_sol_sr", "rerun_moi_sol_sr")):
e_terms += ["Coul-SR:MOI-MOI", "LJ-SR:MOI-MOI", "LJ-LR:MOI-MOI", "Coul-14:MOI-MOI", "LJ-14:MOI-MOI", "Coul-SR:MOI-SOL", "LJ-SR:MOI-SOL"]
elif(e_nonbonded_type in ("run_moi_sol_lr", "rerun_moi_sol_lr")):
e_terms += ["Coul-SR:MOI-MOI", "LJ-SR:MOI-MOI", "LJ-LR:MOI-MOI", "Coul-14:MOI-MOI", "LJ-14:MOI-MOI", "Coul-SR:MOI-SOL", "LJ-SR:MOI-SOL", "LJ-LR:MOI-SOL"]
elif(e_nonbonded_type in ("run_custom", "rerun_custom")):
assert(custom_e_terms)
e_terms += custom_e_terms
else:
raise(Exception("Method unkown: "+e_bonded_type))
if (len(e_terms) >= 0):
xvg_fn = mktemp(suffix=".xvg", dir=mins_dir)
cmd = ["g_energy", "-dp", "-f", edr_fn, "-o", xvg_fn, "-sum"]
print("Calling: "+(" ".join(cmd)))
p = Popen(cmd, stdin=PIPE)
p.communicate(input=("\n".join(e_terms)+"\n"))
assert(p.wait() == 0)
# skipping over "#"-comments at the beginning of xvg-file
e = np.loadtxt(xvg_fn, comments="@", usecols=(1,), skiprows=10) [-1]
os.remove(xvg_fn)
else:
e = 0
mins[partition[i]].append(e + get_phi_potential(presamp_int.getframes([i]), nodes[partition[i]])[0])
for i in xrange(len(nodes)):
nodes[i].tmp["opt_pot_e"] = min(mins[i])
def get_phi_mat (ints, nodes):
"caclculates the membership of every frame to every node"
phi = np.empty((ints.n_frames, len(nodes)))
for j in xrange(len(nodes)):
phi[:,j] = get_phi(ints, nodes[j])
return phi
#===============================================================================
if(__name__ == "__main__"):
main()
#EOF
|
CMD-at-ZIB/ZIBMolPy
|
tools/zgf_reweight.py
|
Python
|
lgpl-3.0
| 31,388
|
[
"Avogadro",
"Gromacs"
] |
42f3b1ff84a0a09c68cf1f9bc56624f76eb69559b8c72e821ba6209b1e386703
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2014, Brian Coca <brian.coca+dev@gmail.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
ANSIBLE_METADATA = {'status': ['stableinterface'],
'supported_by': 'core',
'version': '1.0'}
DOCUMENTATION = '''
---
module: getent
short_description: a wrapper to the unix getent utility
description:
- Runs getent against one of it's various databases and returns information into
the host's facts, in a getent_<database> prefixed variable
version_added: "1.8"
options:
database:
required: True
description:
- the name of a getent database supported by the target system (passwd, group,
hosts, etc).
key:
required: False
default: ''
description:
- key from which to return values from the specified database, otherwise the
full contents are returned.
split:
required: False
default: None
description:
- "character used to split the database values into lists/arrays such as ':' or '\t', otherwise it will try to pick one depending on the database"
fail_key:
required: False
default: True
description:
- If a supplied key is missing this will make the task fail if True
notes:
- "Not all databases support enumeration, check system documentation for details"
requirements: [ ]
author: "Brian Coca (@bcoca)"
'''
EXAMPLES = '''
# get root user info
- getent:
database: passwd
key: root
- debug:
var: getent_passwd
# get all groups
- getent:
database: group
split: ':'
- debug:
var: getent_group
# get all hosts, split by tab
- getent:
database: hosts
- debug:
var: getent_hosts
# get http service info, no error if missing
- getent:
database: services
key: http
fail_key: False
- debug:
var: getent_services
# get user password hash (requires sudo/root)
- getent:
database: shadow
key: www-data
split: ':'
- debug:
var: getent_shadow
'''
from ansible.module_utils.basic import *
from ansible.module_utils.pycompat24 import get_exception
def main():
module = AnsibleModule(
argument_spec = dict(
database = dict(required=True),
key = dict(required=False, default=None),
split = dict(required=False, default=None),
fail_key = dict(required=False, type='bool', default=True),
),
supports_check_mode = True,
)
colon = [ 'passwd', 'shadow', 'group', 'gshadow' ]
database = module.params['database']
key = module.params.get('key')
split = module.params.get('split')
fail_key = module.params.get('fail_key')
getent_bin = module.get_bin_path('getent', True)
if key is not None:
cmd = [ getent_bin, database, key ]
else:
cmd = [ getent_bin, database ]
if split is None and database in colon:
split = ':'
try:
rc, out, err = module.run_command(cmd)
except Exception:
e = get_exception()
module.fail_json(msg=str(e))
msg = "Unexpected failure!"
dbtree = 'getent_%s' % database
results = { dbtree: {} }
if rc == 0:
for line in out.splitlines():
record = line.split(split)
results[dbtree][record[0]] = record[1:]
module.exit_json(ansible_facts=results)
elif rc == 1:
msg = "Missing arguments, or database unknown."
elif rc == 2:
msg = "One or more supplied key could not be found in the database."
if not fail_key:
results[dbtree][key] = None
module.exit_json(ansible_facts=results, msg=msg)
elif rc == 3:
msg = "Enumeration not supported on this database."
module.fail_json(msg=msg)
if __name__ == '__main__':
main()
|
Rajeshkumar90/ansible-modules-extras
|
system/getent.py
|
Python
|
gpl-3.0
| 4,497
|
[
"Brian"
] |
612f0872f69bb99ef7c72328e0a54a889fcca70e650fd2c4c70a2ff174da68ff
|
#
# Gramps - a GTK+/GNOME based genealogy program
#
# Copyright (C) 2008 Brian G. Matherly
# Copyright (C) 2008 Jerome Rapinat
# Copyright (C) 2008 Benny Malengier
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#
# gen.filters.rules/Person/_HasAssociation.py
# $Id$
#
#-------------------------------------------------------------------------
#
# Standard Python modules
#
#-------------------------------------------------------------------------
from ....const import GRAMPS_LOCALE as glocale
_ = glocale.get_translation().gettext
#-------------------------------------------------------------------------
#
# GRAMPS modules
#
#-------------------------------------------------------------------------
from .. import Rule
#-------------------------------------------------------------------------
#
# HasAssociation
#
#-------------------------------------------------------------------------
class HasAssociation(Rule):
"""Rule that checks for a person with a personal association"""
labels = [ _('Number of instances:'), _('Number must be:')]
name = _('People with <count> associations')
description = _("Matches people with a certain number of associations")
category = _('General filters')
def prepare(self, db):
# things we want to do just once, not for every handle
if self.list[1] == 'lesser than':
self.count_type = 0
elif self.list[1] == 'greater than':
self.count_type = 2
else:
self.count_type = 1 # "equal to"
self.userSelectedCount = int(self.list[0])
def apply(self, db, person):
return len( person.get_person_ref_list()) > 0
if self.count_type == 0: # "lesser than"
return count < self.userSelectedCount
elif self.count_type == 2: # "greater than"
return count > self.userSelectedCount
# "equal to"
return count == self.userSelectedCount
|
Forage/Gramps
|
gramps/gen/filters/rules/person/_hasassociation.py
|
Python
|
gpl-2.0
| 2,624
|
[
"Brian"
] |
606c7a457394c8bcc40ab7a4158ef36e47298a40af7b4cc5f17ec89a9df235d6
|
from __future__ import division
import jedi.jedi as jedi
from jedi.utils import plot, init_tools
import matplotlib.pylab as plt
import numpy as np
# Setting Seeds
seeds = np.random.uniform(0,10000,1).astype(int)
# sine-wave target
target = lambda t0: np.cos(2 * np.pi * t0/.5)
#Simulation parameters for FORCE
dt = .01 # time step
tmax = 10 # simulation length
tstart = 0
tstop = 5 # learning stop time
rho = 1.25 # spectral radius of J
N = 300 # size of stochastic pool
lr = 1.0 # learning rate
pE = .8 # percent excitatory
sparsity = (.1,1,1) # sparsity
# Noise matrix
noise_mat = np.array([np.random.normal(0,.3,N) for i in range(int(tmax/dt+2))])
errors = []
zs = []
wus = []
for seedling in seeds:
J, Wz, _, x0, u, w = init_tools.set_simulation_parameters(seedling, N, 1, pE=pE, p=sparsity, rho=rho)
# inp & z are dummy variables
def model(t0, x, params):
index = params['index']
tanh_x = params['tanh_x']
z = params['z']
noise = params['noise'][index]
return (-x + np.dot(J, tanh_x) + Wz*z + noise)/dt
x, t, z, _, wu,_ = jedi.force(target, model, lr, dt, tmax, tstart, tstop, x0, w, noise=noise_mat)
zs.append(z)
wus.append(wu)
error = np.abs(z-target(t))
errors.append(error)
errors = np.array(errors)
# Visualizing activities of first 20 neurons
T = 300
plt.figure(figsize=(12,4))
plt.subplot(211)
plt.title("Neuron Dynamics");
for i in range(10):
plt.plot(t[:T], x[:T, i]);
plt.subplot(212)
for i in range(10):
plt.plot(t[-T:], x[-T:, i]);
plt.xlim(t[-T], t[-1]);
plt.show()
## -- DFORCE -- ##
derrors = []
zs = []
wus = []
for seedling in seeds:
J, Wz, _, x0, u, w = init_tools.set_simulation_parameters(seedling, N, 1, pE=pE, p=sparsity, rho=rho)
def model(t0, x, params):
index = params['index']
tanh_x = params['tanh_x']
z = params['z']
noise = params['noise'][index]
return (-x + np.dot(J, tanh_x) + Wz*z + noise)/dt
x, t, z, _, wu,_ = jedi.dforce(jedi.step_decode, target, model, lr, dt, tmax, tstart, tstop, x0, w,
noise=noise_mat, pE=pE)
zs.append(z)
wus.append(wu)
derror = np.abs(z-target(t))
derrors.append(derror)
derrors = np.array(derrors)
# Visualizing activities of first 20 neurons
T = 300
plt.figure(figsize=(12,4))
plt.subplot(211)
plt.title("Neuron Dynamics");
for i in range(10):
plt.plot(t[:T], x[:T, i]);
plt.subplot(212)
for i in range(10):
plt.plot(t[-T:], x[-T:, i]);
plt.xlim(t[-T], t[-1]);
plt.show()
plt.figure(figsize=(12,4))
plot.cross_signal_error(errors, derrors, t, tstart, tstop,
title="FORCE vs SFORCE (Sin Wave))", burn_in=100)
plt.show()
|
avicennax/jedi
|
tests/sin_test.py
|
Python
|
mit
| 2,751
|
[
"NEURON"
] |
0c60d3a78d0c1bca40db0ffb55f91713c5600171d35d25d99dd3063fac4a8f66
|
##
# Copyright 2009-2020 Ghent University
#
# This file is part of EasyBuild,
# originally created by the HPC team of Ghent University (http://ugent.be/hpc/en),
# with support of Ghent University (http://ugent.be/hpc),
# the Flemish Supercomputer Centre (VSC) (https://www.vscentrum.be),
# Flemish Research Foundation (FWO) (http://www.fwo.be/en)
# and the Department of Economy, Science and Innovation (EWI) (http://www.ewi-vlaanderen.be/en).
#
# https://github.com/easybuilders/easybuild
#
# EasyBuild is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation v2.
#
# EasyBuild is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with EasyBuild. If not, see <http://www.gnu.org/licenses/>.
##
"""
EasyBuild support for building and installing WRF, implemented as an easyblock
@author: Stijn De Weirdt (Ghent University)
@author: Dries Verdegem (Ghent University)
@author: Kenneth Hoste (Ghent University)
@author: Pieter De Baets (Ghent University)
@author: Jens Timmerman (Ghent University)
@author: Andreas Hilboll (University of Bremen)
"""
import os
import re
from distutils.version import LooseVersion
import easybuild.tools.environment as env
import easybuild.tools.toolchain as toolchain
from easybuild.easyblocks.netcdf import set_netcdf_env_vars # @UnresolvedImport
from easybuild.framework.easyblock import EasyBlock
from easybuild.framework.easyconfig import CUSTOM, MANDATORY
from easybuild.tools.build_log import EasyBuildError
from easybuild.tools.config import build_option
from easybuild.tools.filetools import apply_regex_substitutions, change_dir, patch_perl_script_autoflush, read_file
from easybuild.tools.filetools import remove_file, symlink
from easybuild.tools.modules import get_software_root
from easybuild.tools.run import run_cmd, run_cmd_qa
def det_wrf_subdir(wrf_version):
"""Determine WRF subdirectory for given WRF version."""
if LooseVersion(wrf_version) < LooseVersion('4.0'):
wrf_subdir = 'WRFV%s' % wrf_version.split('.')[0]
else:
wrf_subdir = 'WRF-%s' % wrf_version
return wrf_subdir
class EB_WRF(EasyBlock):
"""Support for building/installing WRF."""
def __init__(self, *args, **kwargs):
"""Add extra config options specific to WRF."""
super(EB_WRF, self).__init__(*args, **kwargs)
self.build_in_installdir = True
self.comp_fam = None
self.wrfsubdir = det_wrf_subdir(self.version)
@staticmethod
def extra_options():
extra_vars = {
'buildtype': [None, "Specify the type of build (serial, smpar (OpenMP), "
"dmpar (MPI), dm+sm (hybrid OpenMP/MPI)).", MANDATORY],
'rewriteopts': [True, "Replace -O3 with CFLAGS/FFLAGS", CUSTOM],
'runtest': [True, "Build and run WRF tests", CUSTOM],
}
return EasyBlock.extra_options(extra_vars)
def configure_step(self):
"""Configure build:
- set some magic environment variables
- run configure script
- adjust configure.wrf file if needed
"""
wrfdir = os.path.join(self.builddir, self.wrfsubdir)
# define $NETCDF* for netCDF dependency (used when creating WRF module file)
set_netcdf_env_vars(self.log)
# HDF5 (optional) dependency
hdf5 = get_software_root('HDF5')
if hdf5:
env.setvar('HDF5', hdf5)
# check if this is parallel HDF5
phdf5_bins = ['h5pcc', 'ph5diff']
parallel_hdf5 = True
for f in phdf5_bins:
if not os.path.exists(os.path.join(hdf5, 'bin', f)):
parallel_hdf5 = False
break
if parallel_hdf5:
env.setvar('PHDF5', hdf5)
else:
self.log.info("Parallel HDF5 module not loaded, assuming that's OK...")
else:
self.log.info("HDF5 module not loaded, assuming that's OK...")
# Parallel netCDF (optional) dependency
pnetcdf = get_software_root('PnetCDF')
if pnetcdf:
env.setvar('PNETCDF', pnetcdf)
# JasPer dependency check + setting env vars
jasper = get_software_root('JasPer')
if jasper:
jasperlibdir = os.path.join(jasper, "lib")
env.setvar('JASPERINC', os.path.join(jasper, "include"))
env.setvar('JASPERLIB', jasperlibdir)
else:
if os.getenv('JASPERINC') or os.getenv('JASPERLIB'):
raise EasyBuildError("JasPer module not loaded, but JASPERINC and/or JASPERLIB still set?")
else:
self.log.info("JasPer module not loaded, assuming that's OK...")
# enable support for large file support in netCDF
env.setvar('WRFIO_NCD_LARGE_FILE_SUPPORT', '1')
# patch arch/Config_new.pl script, so that run_cmd_qa receives all output to answer questions
if LooseVersion(self.version) < LooseVersion('4.0'):
patch_perl_script_autoflush(os.path.join(wrfdir, "arch", "Config_new.pl"))
# determine build type option to look for
build_type_option = None
self.comp_fam = self.toolchain.comp_family()
if self.comp_fam == toolchain.INTELCOMP: # @UndefinedVariable
if LooseVersion(self.version) >= LooseVersion('3.7'):
build_type_option = "INTEL\ \(ifort\/icc\)"
else:
build_type_option = "Linux x86_64 i486 i586 i686, ifort compiler with icc"
elif self.comp_fam == toolchain.GCC: # @UndefinedVariable
if LooseVersion(self.version) >= LooseVersion('3.7'):
build_type_option = "GNU\ \(gfortran\/gcc\)"
else:
build_type_option = "x86_64 Linux, gfortran compiler with gcc"
else:
raise EasyBuildError("Don't know how to figure out build type to select.")
# fetch selected build type (and make sure it makes sense)
known_build_types = ['serial', 'smpar', 'dmpar', 'dm+sm']
self.parallel_build_types = ["dmpar", "dm+sm"]
bt = self.cfg['buildtype']
if bt not in known_build_types:
raise EasyBuildError("Unknown build type: '%s'. Supported build types: %s", bt, known_build_types)
# Escape the "+" in "dm+sm" since it's being used in a regexp below.
bt = bt.replace('+', r'\+')
# fetch option number based on build type option and selected build type
if LooseVersion(self.version) >= LooseVersion('3.7'):
# the two relevant lines in the configure output for WRF 3.8 are:
# 13. (serial) 14. (smpar) 15. (dmpar) 16. (dm+sm) INTEL (ifort/icc)
# 32. (serial) 33. (smpar) 34. (dmpar) 35. (dm+sm) GNU (gfortran/gcc)
build_type_question = "\s*(?P<nr>[0-9]+)\.\ \(%s\).*%s" % (bt, build_type_option)
else:
# the relevant lines in the configure output for WRF 3.6 are:
# 13. Linux x86_64 i486 i586 i686, ifort compiler with icc (serial)
# 14. Linux x86_64 i486 i586 i686, ifort compiler with icc (smpar)
# 15. Linux x86_64 i486 i586 i686, ifort compiler with icc (dmpar)
# 16. Linux x86_64 i486 i586 i686, ifort compiler with icc (dm+sm)
# 32. x86_64 Linux, gfortran compiler with gcc (serial)
# 33. x86_64 Linux, gfortran compiler with gcc (smpar)
# 34. x86_64 Linux, gfortran compiler with gcc (dmpar)
# 35. x86_64 Linux, gfortran compiler with gcc (dm+sm)
build_type_question = "\s*(?P<nr>[0-9]+).\s*%s\s*\(%s\)" % (build_type_option, bt)
# run configure script
cmd = "./configure"
qa = {
# named group in match will be used to construct answer
"Compile for nesting? (1=basic, 2=preset moves, 3=vortex following) [default 1]:": "1",
"Compile for nesting? (0=no nesting, 1=basic, 2=preset moves, 3=vortex following) [default 0]:": "0"
}
no_qa = [
"testing for fseeko and fseeko64",
r"If you wish to change the default options, edit the file:[\s\n]*arch/configure_new.defaults"
]
std_qa = {
# named group in match will be used to construct answer
r"%s.*\n(.*\n)*Enter selection\s*\[[0-9]+-[0-9]+\]\s*:" % build_type_question: "%(nr)s",
}
run_cmd_qa(cmd, qa, no_qa=no_qa, std_qa=std_qa, log_all=True, simple=True)
cfgfile = 'configure.wrf'
# make sure correct compilers are being used
comps = {
'SCC': os.getenv('CC'),
'SFC': os.getenv('F90'),
'CCOMP': os.getenv('CC'),
'DM_FC': os.getenv('MPIF90'),
'DM_CC': "%s -DMPI2_SUPPORT" % os.getenv('MPICC'),
}
regex_subs = [(r"^(%s\s*=\s*).*$" % k, r"\1 %s" % v) for (k, v) in comps.items()]
apply_regex_substitutions(cfgfile, regex_subs)
# rewrite optimization options if desired
if self.cfg['rewriteopts']:
# replace default -O3 option in configure.wrf with CFLAGS/FFLAGS from environment
self.log.info("Rewriting optimization options in %s" % cfgfile)
# set extra flags for Intel compilers
# see http://software.intel.com/en-us/forums/showthread.php?t=72109&p=1#146748
if self.comp_fam == toolchain.INTELCOMP: # @UndefinedVariable
# -O3 -heap-arrays is required to resolve compilation error
for envvar in ['CFLAGS', 'FFLAGS']:
val = os.getenv(envvar)
if '-O3' in val:
env.setvar(envvar, '%s -heap-arrays' % val)
self.log.info("Updated %s to '%s'" % (envvar, os.getenv(envvar)))
# replace -O3 with desired optimization options
regex_subs = [
(r"^(FCOPTIM.*)(\s-O3)(\s.*)$", r"\1 %s \3" % os.getenv('FFLAGS')),
(r"^(CFLAGS_LOCAL.*)(\s-O3)(\s.*)$", r"\1 %s \3" % os.getenv('CFLAGS')),
]
apply_regex_substitutions(cfgfile, regex_subs)
def build_step(self):
"""Build and install WRF and testcases using provided compile script."""
# enable parallel build
par = self.cfg['parallel']
self.par = ''
if par:
self.par = "-j %s" % par
# build wrf (compile script uses /bin/csh )
cmd = "tcsh ./compile %s wrf" % self.par
run_cmd(cmd, log_all=True, simple=True, log_output=True)
# build two testcases to produce ideal.exe and real.exe
for test in ["em_real", "em_b_wave"]:
cmd = "tcsh ./compile %s %s" % (self.par, test)
run_cmd(cmd, log_all=True, simple=True, log_output=True)
def test_step(self):
"""Build and run tests included in the WRF distribution."""
if self.cfg['runtest']:
if self.cfg['buildtype'] in self.parallel_build_types and not build_option('mpi_tests'):
self.log.info("Skipping testing of WRF with build type '%s' since MPI testing is disabled",
self.cfg['buildtype'])
return
# get list of WRF test cases
self.testcases = []
if os.path.exists('test'):
self.testcases = os.listdir('test')
elif not self.dry_run:
raise EasyBuildError("Test directory not found, failed to determine list of test cases")
# exclude 2d testcases in parallel WRF builds
if self.cfg['buildtype'] in self.parallel_build_types:
self.testcases = [test for test in self.testcases if '2d_' not in test]
# exclude real testcases
self.testcases = [test for test in self.testcases if not test.endswith("_real")]
self.log.debug("intermediate list of testcases: %s" % self.testcases)
# exclude tests that should not be run
for test in ["em_esmf_exp", "em_scm_xy", "nmm_tropical_cyclone"]:
if test in self.testcases:
self.testcases.remove(test)
# some tests hang when WRF is built with Intel compilers
if self.comp_fam == toolchain.INTELCOMP: # @UndefinedVariable
for test in ["em_heldsuarez"]:
if test in self.testcases:
self.testcases.remove(test)
# determine number of MPI ranks to use in tests (1/2 of available processors + 1);
# we need to limit max number of MPI ranks (8 is too high for some tests, 4 is OK),
# since otherwise run may fail because domain size is too small
n_mpi_ranks = min(self.cfg['parallel'] / 2 + 1, 4)
# prepare run command
# stack limit needs to be set to unlimited for WRF to work well
if self.cfg['buildtype'] in self.parallel_build_types:
test_cmd = "ulimit -s unlimited && %s && %s" % (self.toolchain.mpi_cmd_for("./ideal.exe", 1),
self.toolchain.mpi_cmd_for("./wrf.exe", n_mpi_ranks))
else:
test_cmd = "ulimit -s unlimited && ./ideal.exe && ./wrf.exe >rsl.error.0000 2>&1"
# regex to check for successful test run
re_success = re.compile("SUCCESS COMPLETE WRF")
def run_test():
"""Run a single test and check for success."""
# run test
(_, ec) = run_cmd(test_cmd, log_all=False, log_ok=False, simple=False)
# read output file
out_fn = 'rsl.error.0000'
if os.path.exists(out_fn):
out_txt = read_file(out_fn)
else:
out_txt = 'FILE NOT FOUND'
if ec == 0:
# exit code zero suggests success, but let's make sure...
if re_success.search(out_txt):
self.log.info("Test %s ran successfully (found '%s' in %s)", test, re_success.pattern, out_fn)
else:
raise EasyBuildError("Test %s failed, pattern '%s' not found in %s: %s",
test, re_success.pattern, out_fn, out_txt)
else:
# non-zero exit code means trouble, show command output
raise EasyBuildError("Test %s failed with exit code %s, output: %s", test, ec, out_txt)
# clean up stuff that gets in the way
fn_prefs = ["wrfinput_", "namelist.output", "wrfout_", "rsl.out.", "rsl.error."]
for filename in os.listdir('.'):
for pref in fn_prefs:
if filename.startswith(pref):
remove_file(filename)
self.log.debug("Cleaned up file %s", filename)
# build and run each test case individually
for test in self.testcases:
self.log.debug("Building and running test %s" % test)
# build and install
cmd = "tcsh ./compile %s %s" % (self.par, test)
run_cmd(cmd, log_all=True, simple=True)
# run test
try:
prev_dir = change_dir('run')
if test in ["em_fire"]:
# handle tests with subtests seperately
testdir = os.path.join("..", "test", test)
for subtest in [x for x in os.listdir(testdir) if os.path.isdir(x)]:
subtestdir = os.path.join(testdir, subtest)
# link required files
for filename in os.listdir(subtestdir):
if os.path.exists(filename):
remove_file(filename)
symlink(os.path.join(subtestdir, filename), filename)
# run test
run_test()
else:
# run test
run_test()
change_dir(prev_dir)
except OSError as err:
raise EasyBuildError("An error occured when running test %s: %s", test, err)
# building/installing is done in build_step, so we can run tests
def install_step(self):
"""Building was done in install dir, so nothing to do in install_step."""
pass
def sanity_check_step(self):
"""Custom sanity check for WRF."""
files = ['libwrflib.a', 'wrf.exe', 'ideal.exe', 'real.exe', 'ndown.exe', 'tc.exe']
# nup.exe was 'temporarily removed' in WRF v3.7, at least until 3.8
if LooseVersion(self.version) < LooseVersion('3.7'):
files.append('nup.exe')
custom_paths = {
'files': [os.path.join(self.wrfsubdir, 'main', f) for f in files],
'dirs': [os.path.join(self.wrfsubdir, d) for d in ['main', 'run']],
}
super(EB_WRF, self).sanity_check_step(custom_paths=custom_paths)
def make_module_req_guess(self):
"""Path-like environment variable updates specific to WRF."""
maindir = os.path.join(self.wrfsubdir, 'main')
return {
'PATH': [maindir],
'LD_LIBRARY_PATH': [maindir],
'MANPATH': [],
}
def make_module_extra(self):
"""Add netCDF environment variables to module file."""
txt = super(EB_WRF, self).make_module_extra()
for netcdf_var in ['NETCDF', 'NETCDFF']:
if os.getenv(netcdf_var) is not None:
txt += self.module_generator.set_environment(netcdf_var, os.getenv(netcdf_var))
return txt
|
pescobar/easybuild-easyblocks
|
easybuild/easyblocks/w/wrf.py
|
Python
|
gpl-2.0
| 18,256
|
[
"NetCDF"
] |
7e326e830900f681dd196e20c46788d80c4a979ac846b4bf22f6ff471784da96
|
"""
Spectral Generation
===================
Defines various objects performing spectral generation:
- :func:`colour.sd_constant`
- :func:`colour.sd_zeros`
- :func:`colour.sd_ones`
- :func:`colour.msds_constant`
- :func:`colour.msds_zeros`
- :func:`colour.msds_ones`
- :func:`colour.colorimetry.sd_gaussian_normal`
- :func:`colour.colorimetry.sd_gaussian_fwhm`
- :attr:`colour.SD_GAUSSIAN_METHODS`
- :func:`colour.sd_gaussian`
- :func:`colour.colorimetry.sd_single_led_Ohno2005`
- :attr:`colour.SD_SINGLE_LED_METHODS`
- :func:`colour.sd_single_led`
- :func:`colour.colorimetry.sd_multi_leds_Ohno2005`
- :attr:`colour.SD_MULTI_LEDS_METHODS`
- :func:`colour.sd_multi_leds`
References
----------
- :cite:`Ohno2005` : Ohno, Yoshi. (2005). Spectral design considerations for
white LED color rendering. Optical Engineering, 44(11), 111302.
doi:10.1117/1.2130694
- :cite:`Ohno2008a` : Ohno, Yoshiro, & Davis, W. (2008). NIST CQS simulation
(Version 7.4) [Computer software].
https://drive.google.com/file/d/1PsuU6QjUJjCX6tQyCud6ul2Tbs8rYWW9/view?\
usp=sharing
"""
from __future__ import annotations
import numpy as np
from colour.colorimetry import (
SPECTRAL_SHAPE_DEFAULT,
MultiSpectralDistributions,
SpectralDistribution,
SpectralShape,
)
from colour.hints import (
Any,
ArrayLike,
Floating,
Literal,
NDArray,
Optional,
Sequence,
Union,
)
from colour.utilities import (
CaseInsensitiveMapping,
as_float_array,
full,
ones,
validate_method,
)
__author__ = "Colour Developers"
__copyright__ = "Copyright 2013 Colour Developers"
__license__ = "New BSD License - https://opensource.org/licenses/BSD-3-Clause"
__maintainer__ = "Colour Developers"
__email__ = "colour-developers@colour-science.org"
__status__ = "Production"
__all__ = [
"sd_constant",
"sd_zeros",
"sd_ones",
"msds_constant",
"msds_zeros",
"msds_ones",
"sd_gaussian_normal",
"sd_gaussian_fwhm",
"SD_GAUSSIAN_METHODS",
"sd_gaussian",
"sd_single_led_Ohno2005",
"SD_SINGLE_LED_METHODS",
"sd_single_led",
"sd_multi_leds_Ohno2005",
"SD_MULTI_LEDS_METHODS",
"sd_multi_leds",
]
def sd_constant(
k: Floating, shape: SpectralShape = SPECTRAL_SHAPE_DEFAULT, **kwargs: Any
) -> SpectralDistribution:
"""
Return a spectral distribution of given spectral shape filled with
constant :math:`k` values.
Parameters
----------
k
Constant :math:`k` to fill the spectral distribution with.
shape
Spectral shape used to create the spectral distribution.
Other Parameters
----------------
kwargs
{:class:`colour.SpectralDistribution`},
See the documentation of the previously listed class.
Returns
-------
:class:`colour.SpectralDistribution`
Constant :math:`k` filled spectral distribution.
Notes
-----
- By default, the spectral distribution will use the shape given by
:attr:`colour.SPECTRAL_SHAPE_DEFAULT` attribute.
Examples
--------
>>> sd = sd_constant(100)
>>> sd.shape
SpectralShape(360.0, 780.0, 1.0)
>>> sd[400]
100.0
"""
settings = {"name": f"{k} Constant"}
settings.update(kwargs)
wavelengths = shape.range()
values = full(len(wavelengths), k)
return SpectralDistribution(values, wavelengths, **settings)
def sd_zeros(
shape: SpectralShape = SPECTRAL_SHAPE_DEFAULT, **kwargs: Any
) -> SpectralDistribution:
"""
Return a spectral distribution of given spectral shape filled with zeros.
Parameters
----------
shape
Spectral shape used to create the spectral distribution.
Other Parameters
----------------
kwargs
{:func:`colour.sd_constant`},
See the documentation of the previously listed definition.
Returns
-------
:class:`colour.SpectralDistribution`
Zeros filled spectral distribution.
Notes
-----
- By default, the spectral distribution will use the shape given by
:attr:`colour.SPECTRAL_SHAPE_DEFAULT` attribute.
Examples
--------
>>> sd = sd_zeros()
>>> sd.shape
SpectralShape(360.0, 780.0, 1.0)
>>> sd[400]
0.0
"""
return sd_constant(0, shape, **kwargs)
def sd_ones(
shape: SpectralShape = SPECTRAL_SHAPE_DEFAULT, **kwargs: Any
) -> SpectralDistribution:
"""
Return a spectral distribution of given spectral shape filled with ones.
Parameters
----------
shape
Spectral shape used to create the spectral distribution.
Other Parameters
----------------
kwargs
{:func:`colour.sd_constant`},
See the documentation of the previously listed definition.
Returns
-------
:class:`colour.SpectralDistribution`
Ones filled spectral distribution.
Notes
-----
- By default, the spectral distribution will use the shape given by
:attr:`colour.SPECTRAL_SHAPE_DEFAULT` attribute.
Examples
--------
>>> sd = sd_ones()
>>> sd.shape
SpectralShape(360.0, 780.0, 1.0)
>>> sd[400]
1.0
"""
return sd_constant(1, shape, **kwargs)
def msds_constant(
k: Floating,
labels: Sequence,
shape: SpectralShape = SPECTRAL_SHAPE_DEFAULT,
**kwargs: Any,
) -> MultiSpectralDistributions:
"""
Return the multi-spectral distributions with given labels and given
spectral shape filled with constant :math:`k` values.
Parameters
----------
k
Constant :math:`k` to fill the multi-spectral distributions with.
labels
Names to use for the :class:`colour.SpectralDistribution` class
instances.
shape
Spectral shape used to create the multi-spectral distributions.
Other Parameters
----------------
kwargs
{:class:`colour.MultiSpectralDistributions`},
See the documentation of the previously listed class.
Returns
-------
:class:`colour.MultiSpectralDistributions`
Constant :math:`k` filled multi-spectral distributions.
Notes
-----
- By default, the multi-spectral distributions will use the shape given
by :attr:`colour.SPECTRAL_SHAPE_DEFAULT` attribute.
Examples
--------
>>> msds = msds_constant(100, labels=['a', 'b', 'c'])
>>> msds.shape
SpectralShape(360.0, 780.0, 1.0)
>>> msds[400]
array([ 100., 100., 100.])
>>> msds.labels # doctest: +SKIP
['a', 'b', 'c']
"""
settings = {"name": f"{k} Constant"}
settings.update(kwargs)
wavelengths = shape.range()
values = full((len(wavelengths), len(labels)), k)
return MultiSpectralDistributions(
values, wavelengths, labels=labels, **settings
)
def msds_zeros(
labels: Sequence,
shape: SpectralShape = SPECTRAL_SHAPE_DEFAULT,
**kwargs: Any,
) -> MultiSpectralDistributions:
"""
Return the multi-spectral distributionss with given labels and given
spectral shape filled with zeros.
Parameters
----------
labels
Names to use for the :class:`colour.SpectralDistribution` class
instances.
shape
Spectral shape used to create the multi-spectral distributions.
Other Parameters
----------------
kwargs
{:func:`colour.msds_constant`},
See the documentation of the previously listed definition.
Returns
-------
:class:`colour.MultiSpectralDistributions`
Zeros filled multi-spectral distributions.
Notes
-----
- By default, the multi-spectral distributions will use the shape given
by :attr:`colour.SPECTRAL_SHAPE_DEFAULT` attribute.
Examples
--------
>>> msds = msds_zeros(labels=['a', 'b', 'c'])
>>> msds.shape
SpectralShape(360.0, 780.0, 1.0)
>>> msds[400]
array([ 0., 0., 0.])
>>> msds.labels # doctest: +SKIP
['a', 'b', 'c']
"""
return msds_constant(0, labels, shape, **kwargs)
def msds_ones(
labels: Sequence,
shape: SpectralShape = SPECTRAL_SHAPE_DEFAULT,
**kwargs: Any,
) -> MultiSpectralDistributions:
"""
Return the multi-spectral distributionss with given labels and given
spectral shape filled with ones.
Parameters
----------
labels
Names to use for the :class:`colour.SpectralDistribution` class
instances.
shape
Spectral shape used to create the multi-spectral distributions.
Other Parameters
----------------
kwargs
{:func:`colour.msds_constant`},
See the documentation of the previously listed definition.
Returns
-------
:class:`colour.MultiSpectralDistributions`
Ones filled multi-spectral distributions.
Notes
-----
- By default, the multi-spectral distributions will use the shape given
by :attr:`colour.SPECTRAL_SHAPE_DEFAULT` attribute.
Examples
--------
>>> msds = msds_ones(labels=['a', 'b', 'c'])
>>> msds.shape
SpectralShape(360.0, 780.0, 1.0)
>>> msds[400]
array([ 1., 1., 1.])
>>> msds.labels # doctest: +SKIP
['a', 'b', 'c']
"""
return msds_constant(1, labels, shape, **kwargs)
def sd_gaussian_normal(
mu: Floating,
sigma: Floating,
shape: SpectralShape = SPECTRAL_SHAPE_DEFAULT,
**kwargs: Any,
) -> SpectralDistribution:
"""
Return a gaussian spectral distribution of given spectral shape at
given mean wavelength :math:`\\mu` and standard deviation :math:`sigma`.
Parameters
----------
mu
Mean wavelength :math:`\\mu` the gaussian spectral distribution will
peak at.
sigma
Standard deviation :math:`sigma` of the gaussian spectral distribution.
shape
Spectral shape used to create the spectral distribution.
Other Parameters
----------------
kwargs
{:class:`colour.SpectralDistribution`},
See the documentation of the previously listed class.
Returns
-------
:class:`colour.SpectralDistribution`
Gaussian spectral distribution.
Notes
-----
- By default, the spectral distribution will use the shape given by
:attr:`colour.SPECTRAL_SHAPE_DEFAULT` attribute.
Examples
--------
>>> sd = sd_gaussian_normal(555, 25)
>>> sd.shape
SpectralShape(360.0, 780.0, 1.0)
>>> sd[555] # doctest: +ELLIPSIS
1.0000000...
>>> sd[530] # doctest: +ELLIPSIS
0.6065306...
"""
settings = {"name": f"{mu}nm - {sigma} Sigma - Gaussian"}
settings.update(kwargs)
wavelengths = shape.range()
values = np.exp(-((wavelengths - mu) ** 2) / (2 * sigma**2))
return SpectralDistribution(values, wavelengths, **settings)
def sd_gaussian_fwhm(
peak_wavelength: Floating,
fwhm: Floating,
shape: SpectralShape = SPECTRAL_SHAPE_DEFAULT,
**kwargs: Any,
) -> SpectralDistribution:
"""
Return a gaussian spectral distribution of given spectral shape at given
peak wavelength and full width at half maximum.
Parameters
----------
peak_wavelength
Wavelength the gaussian spectral distribution will peak at.
fwhm
Full width at half maximum, i.e. width of the gaussian spectral
distribution measured between those points on the *y* axis which are
half the maximum amplitude.
shape
Spectral shape used to create the spectral distribution.
Other Parameters
----------------
kwargs
{:class:`colour.SpectralDistribution`},
See the documentation of the previously listed class.
Returns
-------
:class:`colour.SpectralDistribution`
Gaussian spectral distribution.
Notes
-----
- By default, the spectral distribution will use the shape given by
:attr:`colour.SPECTRAL_SHAPE_DEFAULT` attribute.
Examples
--------
>>> sd = sd_gaussian_fwhm(555, 25)
>>> sd.shape
SpectralShape(360.0, 780.0, 1.0)
>>> sd[555]
1.0
>>> sd[530] # doctest: +ELLIPSIS
0.3678794...
"""
settings = {"name": f"{peak_wavelength}nm - {fwhm} FWHM - Gaussian"}
settings.update(kwargs)
wavelengths = shape.range()
values = np.exp(-(((wavelengths - peak_wavelength) / fwhm) ** 2))
return SpectralDistribution(values, wavelengths, **settings)
SD_GAUSSIAN_METHODS: CaseInsensitiveMapping = CaseInsensitiveMapping(
{"Normal": sd_gaussian_normal, "FWHM": sd_gaussian_fwhm}
)
SD_GAUSSIAN_METHODS.__doc__ = """
Supported gaussian spectral distribution computation methods.
"""
def sd_gaussian(
mu_peak_wavelength: Floating,
sigma_fwhm: Floating,
shape: SpectralShape = SPECTRAL_SHAPE_DEFAULT,
method: Union[Literal["Normal", "FWHM"], str] = "Normal",
**kwargs: Any,
) -> SpectralDistribution:
"""
Return a gaussian spectral distribution of given spectral shape using
given method.
Parameters
----------
mu_peak_wavelength
Mean wavelength :math:`\\mu` the gaussian spectral distribution will
peak at.
sigma_fwhm
Standard deviation :math:`sigma` of the gaussian spectral distribution
or Full width at half maximum, i.e. width of the gaussian spectral
distribution measured between those points on the *y* axis which are
half the maximum amplitude.
shape
Spectral shape used to create the spectral distribution.
method
Computation method.
Other Parameters
----------------
kwargs
{:func:`colour.colorimetry.sd_gaussian_normal`,
:func:`colour.colorimetry.sd_gaussian_fwhm`},
See the documentation of the previously listed definitions.
Returns
-------
:class:`colour.SpectralDistribution`
Gaussian spectral distribution.
Notes
-----
- By default, the spectral distribution will use the shape given by
:attr:`colour.SPECTRAL_SHAPE_DEFAULT` attribute.
Examples
--------
>>> sd = sd_gaussian(555, 25)
>>> sd.shape
SpectralShape(360.0, 780.0, 1.0)
>>> sd[555] # doctest: +ELLIPSIS
1.0000000...
>>> sd[530] # doctest: +ELLIPSIS
0.6065306...
>>> sd = sd_gaussian(555, 25, method='FWHM')
>>> sd.shape
SpectralShape(360.0, 780.0, 1.0)
>>> sd[555]
1.0
>>> sd[530] # doctest: +ELLIPSIS
0.3678794...
"""
method = validate_method(method, SD_GAUSSIAN_METHODS)
return SD_GAUSSIAN_METHODS[method](
mu_peak_wavelength, sigma_fwhm, shape, **kwargs
)
def sd_single_led_Ohno2005(
peak_wavelength: Floating,
fwhm: Floating,
shape: SpectralShape = SPECTRAL_SHAPE_DEFAULT,
**kwargs: Any,
) -> SpectralDistribution:
"""
Return a single *LED* spectral distribution of given spectral shape at
given peak wavelength and full width at half maximum according to
*Ohno (2005)* method.
Parameters
----------
peak_wavelength
Wavelength the single *LED* spectral distribution will peak at.
fwhm
Full width at half maximum, i.e. width of the underlying gaussian
spectral distribution measured between those points on the *y* axis
which are half the maximum amplitude.
shape
Spectral shape used to create the spectral distribution.
Other Parameters
----------------
kwargs
{:func:`colour.colorimetry.sd_gaussian_fwhm`},
See the documentation of the previously listed definition.
Returns
-------
:class:`colour.SpectralDistribution`
Single *LED* spectral distribution.
Notes
-----
- By default, the spectral distribution will use the shape given by
:attr:`colour.SPECTRAL_SHAPE_DEFAULT` attribute.
References
----------
:cite:`Ohno2005`, :cite:`Ohno2008a`
Examples
--------
>>> sd = sd_single_led_Ohno2005(555, 25)
>>> sd.shape
SpectralShape(360.0, 780.0, 1.0)
>>> sd[555] # doctest: +ELLIPSIS
1.0000000...
"""
settings = {"name": f"{peak_wavelength}nm - {fwhm} FWHM LED - Ohno (2005)"}
settings.update(kwargs)
sd = sd_gaussian_fwhm(peak_wavelength, fwhm, shape, **kwargs)
sd.values = (sd.values + 2 * sd.values**5) / 3
return sd
SD_SINGLE_LED_METHODS: CaseInsensitiveMapping = CaseInsensitiveMapping(
{
"Ohno 2005": sd_single_led_Ohno2005,
}
)
SD_SINGLE_LED_METHODS.__doc__ = """
Supported single *LED* spectral distribution computation methods.
"""
def sd_single_led(
peak_wavelength: Floating,
fwhm: Floating,
shape: SpectralShape = SPECTRAL_SHAPE_DEFAULT,
method: Union[Literal["Ohno 2005"], str] = "Ohno 2005",
**kwargs: Any,
) -> SpectralDistribution:
"""
Return a single *LED* spectral distribution of given spectral shape at
given peak wavelength and full width at half maximum according to given
method.
Parameters
----------
peak_wavelength
Wavelength the single *LED* spectral distribution will peak at.
fwhm
Full width at half maximum, i.e. width of the underlying gaussian
spectral distribution measured between those points on the *y*
axis which are half the maximum amplitude.
shape
Spectral shape used to create the spectral distribution.
method
Computation method.
Other Parameters
----------------
kwargs
{:func:`colour.colorimetry.sd_single_led_Ohno2005`},
See the documentation of the previously listed definition.
Returns
-------
:class:`colour.SpectralDistribution`
Single *LED* spectral distribution.
Notes
-----
- By default, the spectral distribution will use the shape given by
:attr:`colour.SPECTRAL_SHAPE_DEFAULT` attribute.
References
----------
:cite:`Ohno2005`, :cite:`Ohno2008a`
Examples
--------
>>> sd = sd_single_led(555, 25)
>>> sd.shape
SpectralShape(360.0, 780.0, 1.0)
>>> sd[555] # doctest: +ELLIPSIS
1.0000000...
"""
method = validate_method(method, SD_SINGLE_LED_METHODS)
return SD_SINGLE_LED_METHODS[method](
peak_wavelength, fwhm, shape, **kwargs
)
def sd_multi_leds_Ohno2005(
peak_wavelengths: ArrayLike,
fwhm: ArrayLike,
peak_power_ratios: Optional[ArrayLike] = None,
shape: SpectralShape = SPECTRAL_SHAPE_DEFAULT,
**kwargs: Any,
) -> SpectralDistribution:
"""
Return a multi *LED* spectral distribution of given spectral shape at
given peak wavelengths and full widths at half maximum according to
*Ohno (2005)* method.
The multi *LED* spectral distribution is generated using many single *LED*
spectral distributions generated with :func:`colour.sd_single_led_Ohno2005`
definition.
Parameters
----------
peak_wavelengths
Wavelengths the multi *LED* spectral distribution will peak at, i.e.
the peaks for each generated single *LED* spectral distributions.
fwhm
Full widths at half maximum, i.e. widths of the underlying gaussian
spectral distributions measured between those points on the *y* axis
which are half the maximum amplitude.
peak_power_ratios
Peak power ratios for each generated single *LED* spectral
distributions.
shape
Spectral shape used to create the spectral distribution.
Other Parameters
----------------
kwargs
{:func:`colour.colorimetry.sd_single_led_Ohno2005`},
See the documentation of the previously listed definition.
Returns
-------
:class:`colour.SpectralDistribution`
Multi *LED* spectral distribution.
Notes
-----
- By default, the spectral distribution will use the shape given by
:attr:`colour.SPECTRAL_SHAPE_DEFAULT` attribute.
References
----------
:cite:`Ohno2005`, :cite:`Ohno2008a`
Examples
--------
>>> sd = sd_multi_leds_Ohno2005(
... np.array([457, 530, 615]),
... np.array([20, 30, 20]),
... np.array([0.731, 1.000, 1.660]),
... )
>>> sd.shape
SpectralShape(360.0, 780.0, 1.0)
>>> sd[500] # doctest: +ELLIPSIS
0.1295132...
"""
peak_wavelengths = as_float_array(peak_wavelengths)
fwhm = np.resize(fwhm, peak_wavelengths.shape)
if peak_power_ratios is None:
peak_power_ratios = ones(peak_wavelengths.shape)
else:
peak_power_ratios = np.resize(
peak_power_ratios, peak_wavelengths.shape
)
sd = sd_zeros(shape)
for (peak_wavelength, fwhm_s, peak_power_ratio) in zip(
peak_wavelengths, fwhm, peak_power_ratios
):
sd += ( # type: ignore[misc]
sd_single_led_Ohno2005(peak_wavelength, fwhm_s, **kwargs)
* peak_power_ratio
)
def _format_array(a: NDArray) -> str:
"""Format given array :math:`a`."""
return ", ".join([str(e) for e in a])
sd.name = (
f"{_format_array(peak_wavelengths)}nm - "
f"{_format_array(fwhm)}FWHM - "
f"{_format_array(peak_power_ratios)} Peak Power Ratios - "
f"LED - Ohno (2005)"
)
return sd
SD_MULTI_LEDS_METHODS: CaseInsensitiveMapping = CaseInsensitiveMapping(
{
"Ohno 2005": sd_multi_leds_Ohno2005,
}
)
SD_MULTI_LEDS_METHODS.__doc__ = """
Supported multi *LED* spectral distribution computation methods.
"""
def sd_multi_leds(
peak_wavelengths: ArrayLike,
fwhm: ArrayLike,
peak_power_ratios: Optional[ArrayLike] = None,
shape: SpectralShape = SPECTRAL_SHAPE_DEFAULT,
method: Union[Literal["Ohno 2005"], str] = "Ohno 2005",
**kwargs: Any,
) -> SpectralDistribution:
"""
Return a multi *LED* spectral distribution of given spectral shape at
given peak wavelengths and full widths at half maximum according to given
method.
Parameters
----------
peak_wavelengths
Wavelengths the multi *LED* spectral distribution will peak at, i.e.
the peaks for each generated single *LED* spectral distributions.
fwhm
Full widths at half maximum, i.e. widths of the underlying gaussian
spectral distributions measured between those points on the *y* axis
which are half the maximum amplitude.
peak_power_ratios
Peak power ratios for each generated single *LED* spectral
distributions.
shape
Spectral shape used to create the spectral distribution.
method
Computation method.
Other Parameters
----------------
kwargs
{:func:`colour.colorimetry.sd_multi_leds_Ohno2005`},
See the documentation of the previously listed definition.
Returns
-------
:class:`colour.SpectralDistribution`
Multi *LED* spectral distribution.
Notes
-----
- By default, the spectral distribution will use the shape given by
:attr:`colour.SPECTRAL_SHAPE_DEFAULT` attribute.
References
----------
:cite:`Ohno2005`, :cite:`Ohno2008a`
Examples
--------
>>> sd = sd_multi_leds(
... np.array([457, 530, 615]),
... np.array([20, 30, 20]),
... np.array([0.731, 1.000, 1.660]),
... )
>>> sd.shape
SpectralShape(360.0, 780.0, 1.0)
>>> sd[500] # doctest: +ELLIPSIS
0.1295132...
"""
method = validate_method(method, SD_MULTI_LEDS_METHODS)
return SD_MULTI_LEDS_METHODS[method](
peak_wavelengths, fwhm, peak_power_ratios, shape, **kwargs
)
|
colour-science/colour
|
colour/colorimetry/generation.py
|
Python
|
bsd-3-clause
| 23,477
|
[
"Gaussian"
] |
f816bf4848374724a909e59068f2e9c8050a8883d748b62c43fdfe682aac618b
|
import unittest
import collections
from ..visitor import ClassVisitor, handle
class TestClassVisitor(unittest.TestCase):
def assert_visitation(self, expected_key, value):
visited_type, visited_value, args, kwargs = ExampleClassVisitor().visit(value, 2, a=3)
self.assertEqual(expected_key, visited_type)
self.assertEqual(value, visited_value)
self.assertEqual((2,), args)
self.assertEqual({'a': 3}, kwargs)
def test_visit_int(self):
self.assert_visitation(int, 1)
def test_visit_bool(self):
self.assert_visitation(bool, True)
def test_visit_subtype(self):
self.assert_visitation(collections.Sequence, ())
def test_visit_special_subtype(self):
self.assert_visitation(str, 'hello')
def test_visit_default(self):
self.assert_visitation('default', object())
def test_visit_default_standard_operation(self):
self.assertRaises(TypeError, ClassVisitor().visit, 1)
class ExampleClassVisitor(ClassVisitor):
@handle(int)
def visit_int(self, value, *args, **kwargs):
return (int, value, args, kwargs)
@handle(bool)
def visit_bool(self, value, *args, **kwargs):
return (bool, value, args, kwargs)
@handle(collections.Sequence)
def visit_seq(self, value, *args, **kwargs):
return (collections.Sequence, value, args, kwargs)
@handle(str)
def visit_str(self, value, *args, **kwargs):
return (str, value, args, kwargs)
def default(self, value, *args, **kwargs):
return ('default', value, args, kwargs)
|
herczy/pydepend
|
pydepend/tests/test_visitor.py
|
Python
|
bsd-3-clause
| 1,593
|
[
"VisIt"
] |
88cd72e3b856efe17b7a040784e9e842ef6741401ede3486cb05e72e5145c2d2
|
class protein ():
pass
def fastaParser(pFile, seq_format):
if seq_format != "fasta":
print "Cannot read non-fasta protein records without biopython."
exit()
yield -1
newProt = protein()
sequence = []
for line in pFile.readlines():
if line[0] == ";":
continue
if line[0] == ">":
if hasattr(newProt, "name"):
newProt.seq = "".join(sequence)
yield newProt
newProt = protein()
sequence = []
newProt.name = line.split(" ")[0][1:]
else:
sequence.append(line.strip("\n "))
newProt.seq = "".join(sequence)
yield newProt
|
DeclanCrew/LC_MS_MS_Search
|
fastaParse.py
|
Python
|
gpl-3.0
| 729
|
[
"Biopython"
] |
9fd7b983ed55f9c26a97f3c8adc31cc5c7b51808b80f1cac3291d90c44c413d0
|
# Brian Keegan, 2012
# This function creates text files containing a list of Wikipedia categories which will be passed to a scraping algorithm.
# Two files are created for each category type, events before 2001 (Wikipedia's founding) and after 2001
#Define schema for naming category types
topics=[('fires', "Category:{0}_fires"),
('health', "Category{0}_health_disasters"),
('industrial', "Category:{0}_industrial_disasters"),
('natural', "Category:{0}_natural_disasters"),
('transport', "Category:Transport_disasters_in_{0}"),
('terrorist', "Category:Terrorist_incidents_in_{0}"),
('conflicts', "Category:Conflicts_in_{0}"),
('crimes', "Category:{0}_crimes")]
start_year = 1990
end_year = 2000
files_pre2k = dict([(topic, open('{0}_pre2k.txt'.format(topic), 'a')) for topic,c in topics])
if 2000 < start_year < end_year:
files_post2k = dict([(topic, open('{0}_post2k.txt'.format(topic), 'a')) for topic,c in topics])
for topic, cat_string in topics:
for year in range(start_year, end_year+1):
category = cat_string.format(year)
if year <= 2000:
files_pre2k[topic].write(category.encode('UTF-16') + '\r\n')
else:
files_post2k[topic].write(category.encode('UTF-16') + '\r\n')
for topic,c in topics:
files_pre2k[topic].close()
if 2000 < start_year < end_year:
files_post2k[topic].close()
|
tothebeat/wikipedia-revisions
|
old/list_generator_2.py
|
Python
|
mit
| 1,418
|
[
"Brian"
] |
8a3a0977a4f1a46caf15273c27e7ab26aecc1d82dcdcc29dc4f5148bad076525
|
##############################################################################
# Copyright (c) 2013-2017, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Created by Todd Gamblin, tgamblin@llnl.gov, All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/spack/spack
# Please also see the NOTICE and LICENSE files for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
from spack import *
class RVariantannotation(RPackage):
"""Annotate variants, compute amino acid coding changes, predict coding
outcomes."""
homepage = "https://www.bioconductor.org/packages/VariantAnnotation/"
url = "https://git.bioconductor.org/packages/VariantAnnotation"
list_url = homepage
version('1.22.3', git='https://git.bioconductor.org/packages/VariantAnnotation', commit='3a91b6d4297aa416d5f056dec6f8925eb1a8eaee')
depends_on('r-biocgenerics', type=('build', 'run'))
depends_on('r-genomeinfodb', type=('build', 'run'))
depends_on('r-genomicranges', type=('build', 'run'))
depends_on('r-summarizedexperiment', type=('build', 'run'))
depends_on('r-rsamtools', type=('build', 'run'))
depends_on('r-dbi', type=('build', 'run'))
depends_on('r-zlibbioc', type=('build', 'run'))
depends_on('r-biobase', type=('build', 'run'))
depends_on('r-s4vectors', type=('build', 'run'))
depends_on('r-iranges', type=('build', 'run'))
depends_on('r-xvector', type=('build', 'run'))
depends_on('r-biostrings', type=('build', 'run'))
depends_on('r-annotationdbi', type=('build', 'run'))
depends_on('r-bsgenome', type=('build', 'run'))
depends_on('r-rtracklayer', type=('build', 'run'))
depends_on('r-genomicfeatures', type=('build', 'run'))
depends_on('r@3.4.0:3.4.9', when='@1.22.3')
|
skosukhin/spack
|
var/spack/repos/builtin/packages/r-variantannotation/package.py
|
Python
|
lgpl-2.1
| 2,615
|
[
"Bioconductor"
] |
f25c5036c091b629255c540579047116cd97e22773d860c7e585753977b0795b
|
# (c) 2014, Brian Coca, Josh Drake, et al
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
from ansible.plugins.cache.base import BaseCacheModule
class CacheModule(BaseCacheModule):
def __init__(self, *args, **kwargs):
self._cache = {}
def get(self, key):
return self._cache.get(key)
def set(self, key, value):
self._cache[key] = value
def keys(self):
return self._cache.keys()
def contains(self, key):
return key in self._cache
def delete(self, key):
del self._cache[key]
def flush(self):
self._cache = {}
def copy(self):
return self._cache.copy()
def __getstate__(self):
return self.copy()
def __setstate__(self, data):
self._cache = data
|
goozbach/ansible
|
lib/ansible/plugins/cache/memory.py
|
Python
|
gpl-3.0
| 1,466
|
[
"Brian"
] |
2df9c02ba0b9a6dd0af267bfbae0c9bbe5e0cd56b231aa5a161cb3db498cd23e
|
import unittest
import numpy
import chainer
from chainer import cuda
from chainer import functions
from chainer import gradient_check
from chainer import testing
from chainer.testing import attr
from chainer.testing import condition
class TestGaussian(unittest.TestCase):
def setUp(self):
self.m = numpy.random.uniform(-1, 1, (3, 2)).astype(numpy.float32)
self.v = numpy.random.uniform(-1, 1, (3, 2)).astype(numpy.float32)
self.gy = numpy.random.uniform(-1, 1, (3, 2)).astype(numpy.float32)
def check_backward(self, m_data, v_data, y_grad):
m = chainer.Variable(m_data)
v = chainer.Variable(v_data)
y = functions.gaussian(m, v)
self.assertEqual(y.data.dtype, numpy.float32)
y.grad = y_grad
y.backward()
func = y.creator
f = lambda: func.forward((m.data, v.data))
gm, gv = gradient_check.numerical_grad(f, (m.data, v.data), (y.grad,))
gradient_check.assert_allclose(gm, m.grad, atol=1e-4, rtol=1e-3)
gradient_check.assert_allclose(gv, v.grad, atol=1e-4, rtol=1e-3)
@condition.retry(3)
def test_backward_cpu(self):
self.check_backward(self.m, self.v, self.gy)
@attr.gpu
@condition.retry(3)
def test_backward_gpu(self):
self.check_backward(cuda.to_gpu(self.m),
cuda.to_gpu(self.v),
cuda.to_gpu(self.gy))
testing.run_module(__name__, __file__)
|
truongdq/chainer
|
tests/chainer_tests/functions_tests/noise_tests/test_gaussian.py
|
Python
|
mit
| 1,464
|
[
"Gaussian"
] |
f5a894581bec611416a5538d76a52f6119f6b2f7886d567e5783b9c9bbe3720f
|
# coding: utf-8
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
import os
import unittest
import json
from monty.json import MontyDecoder
import numpy as np
import matplotlib
from pymatgen.util.testing import PymatgenTest
from pymatgen.analysis.xas.spectrum import XANES
from pymatgen.vis.plotters import SpectrumPlotter
test_dir = os.path.join(os.path.dirname(__file__), "..", "..", "..",
"test_files/spectrum_test")
with open(os.path.join(test_dir, 'Pd2O.json')) as fp:
spect_data_dict = json.load(fp, cls=MontyDecoder)
class SpectrumPlotterTest(PymatgenTest):
def setUp(self):
self.xanes = XANES.from_dict(spect_data_dict)
def test_get_plot(self):
self.plotter = SpectrumPlotter(yshift=0.2)
self.plotter.add_spectrum("Pd2O", self.xanes)
xanes = self.xanes.copy()
xanes.y += np.random.randn(len(xanes.y)) * 0.005
self.plotter.add_spectrum("Pd2O + noise", xanes)
self.plotter.add_spectrum("Pd2O - replot", xanes, "k")
plt = self.plotter.get_plot()
self.plotter.save_plot("spectrum_plotter_test.eps")
os.remove("spectrum_plotter_test.eps")
def test_get_stacked_plot(self):
self.plotter = SpectrumPlotter(yshift=0.2, stack=True)
self.plotter.add_spectrum("Pd2O", self.xanes, "b")
xanes = self.xanes.copy()
xanes.y += np.random.randn(len(xanes.y)) * 0.005
self.plotter.add_spectrum("Pd2O + noise", xanes, "r")
plt = self.plotter.get_plot()
if __name__ == '__main__':
unittest.main()
|
dongsenfo/pymatgen
|
pymatgen/vis/tests/test_plotters.py
|
Python
|
mit
| 1,607
|
[
"pymatgen"
] |
b34484a9e5fb68cc1170c4a7cbb98c1b82ef357be75bd302d1e2e25a5fa7b77e
|
""" manage PyTables query interface via Expressions """
import ast
from functools import partial
import numpy as np
from pandas.compat import DeepChainMap, string_types, u
from pandas.core.dtypes.common import is_list_like
import pandas as pd
from pandas.core.base import StringMixin
import pandas.core.common as com
from pandas.core.computation import expr, ops
from pandas.core.computation.common import _ensure_decoded
from pandas.core.computation.expr import BaseExprVisitor
from pandas.core.computation.ops import UndefinedVariableError, is_term
from pandas.io.formats.printing import pprint_thing, pprint_thing_encoded
class Scope(expr.Scope):
__slots__ = 'queryables',
def __init__(self, level, global_dict=None, local_dict=None,
queryables=None):
super(Scope, self).__init__(level + 1, global_dict=global_dict,
local_dict=local_dict)
self.queryables = queryables or dict()
class Term(ops.Term):
def __new__(cls, name, env, side=None, encoding=None):
klass = Constant if not isinstance(name, string_types) else cls
supr_new = StringMixin.__new__
return supr_new(klass)
def __init__(self, name, env, side=None, encoding=None):
super(Term, self).__init__(name, env, side=side, encoding=encoding)
def _resolve_name(self):
# must be a queryables
if self.side == 'left':
if self.name not in self.env.queryables:
raise NameError('name {name!r} is not defined'
.format(name=self.name))
return self.name
# resolve the rhs (and allow it to be None)
try:
return self.env.resolve(self.name, is_local=False)
except UndefinedVariableError:
return self.name
@property
def value(self):
return self._value
class Constant(Term):
def __init__(self, value, env, side=None, encoding=None):
super(Constant, self).__init__(value, env, side=side,
encoding=encoding)
def _resolve_name(self):
return self._name
class BinOp(ops.BinOp):
_max_selectors = 31
def __init__(self, op, lhs, rhs, queryables, encoding):
super(BinOp, self).__init__(op, lhs, rhs)
self.queryables = queryables
self.encoding = encoding
self.filter = None
self.condition = None
def _disallow_scalar_only_bool_ops(self):
pass
def prune(self, klass):
def pr(left, right):
""" create and return a new specialized BinOp from myself """
if left is None:
return right
elif right is None:
return left
k = klass
if isinstance(left, ConditionBinOp):
if (isinstance(left, ConditionBinOp) and
isinstance(right, ConditionBinOp)):
k = JointConditionBinOp
elif isinstance(left, k):
return left
elif isinstance(right, k):
return right
elif isinstance(left, FilterBinOp):
if (isinstance(left, FilterBinOp) and
isinstance(right, FilterBinOp)):
k = JointFilterBinOp
elif isinstance(left, k):
return left
elif isinstance(right, k):
return right
return k(self.op, left, right, queryables=self.queryables,
encoding=self.encoding).evaluate()
left, right = self.lhs, self.rhs
if is_term(left) and is_term(right):
res = pr(left.value, right.value)
elif not is_term(left) and is_term(right):
res = pr(left.prune(klass), right.value)
elif is_term(left) and not is_term(right):
res = pr(left.value, right.prune(klass))
elif not (is_term(left) or is_term(right)):
res = pr(left.prune(klass), right.prune(klass))
return res
def conform(self, rhs):
""" inplace conform rhs """
if not is_list_like(rhs):
rhs = [rhs]
if isinstance(rhs, np.ndarray):
rhs = rhs.ravel()
return rhs
@property
def is_valid(self):
""" return True if this is a valid field """
return self.lhs in self.queryables
@property
def is_in_table(self):
""" return True if this is a valid column name for generation (e.g. an
actual column in the table) """
return self.queryables.get(self.lhs) is not None
@property
def kind(self):
""" the kind of my field """
return getattr(self.queryables.get(self.lhs), 'kind', None)
@property
def meta(self):
""" the meta of my field """
return getattr(self.queryables.get(self.lhs), 'meta', None)
@property
def metadata(self):
""" the metadata of my field """
return getattr(self.queryables.get(self.lhs), 'metadata', None)
def generate(self, v):
""" create and return the op string for this TermValue """
val = v.tostring(self.encoding)
return "({lhs} {op} {val})".format(lhs=self.lhs, op=self.op, val=val)
def convert_value(self, v):
""" convert the expression that is in the term to something that is
accepted by pytables """
def stringify(value):
if self.encoding is not None:
encoder = partial(pprint_thing_encoded,
encoding=self.encoding)
else:
encoder = pprint_thing
return encoder(value)
kind = _ensure_decoded(self.kind)
meta = _ensure_decoded(self.meta)
if kind == u('datetime64') or kind == u('datetime'):
if isinstance(v, (int, float)):
v = stringify(v)
v = _ensure_decoded(v)
v = pd.Timestamp(v)
if v.tz is not None:
v = v.tz_convert('UTC')
return TermValue(v, v.value, kind)
elif kind == u('timedelta64') or kind == u('timedelta'):
v = pd.Timedelta(v, unit='s').value
return TermValue(int(v), v, kind)
elif meta == u('category'):
metadata = com.values_from_object(self.metadata)
result = metadata.searchsorted(v, side='left')
# result returns 0 if v is first element or if v is not in metadata
# check that metadata contains v
if not result and v not in metadata:
result = -1
return TermValue(result, result, u('integer'))
elif kind == u('integer'):
v = int(float(v))
return TermValue(v, v, kind)
elif kind == u('float'):
v = float(v)
return TermValue(v, v, kind)
elif kind == u('bool'):
if isinstance(v, string_types):
v = not v.strip().lower() in [u('false'), u('f'), u('no'),
u('n'), u('none'), u('0'),
u('[]'), u('{}'), u('')]
else:
v = bool(v)
return TermValue(v, v, kind)
elif isinstance(v, string_types):
# string quoting
return TermValue(v, stringify(v), u('string'))
else:
raise TypeError("Cannot compare {v} of type {typ} to {kind} column"
.format(v=v, typ=type(v), kind=kind))
def convert_values(self):
pass
class FilterBinOp(BinOp):
def __unicode__(self):
return pprint_thing("[Filter : [{lhs}] -> [{op}]"
.format(lhs=self.filter[0], op=self.filter[1]))
def invert(self):
""" invert the filter """
if self.filter is not None:
f = list(self.filter)
f[1] = self.generate_filter_op(invert=True)
self.filter = tuple(f)
return self
def format(self):
""" return the actual filter format """
return [self.filter]
def evaluate(self):
if not self.is_valid:
raise ValueError("query term is not valid [{slf}]"
.format(slf=self))
rhs = self.conform(self.rhs)
values = [TermValue(v, v, self.kind) for v in rhs]
if self.is_in_table:
# if too many values to create the expression, use a filter instead
if self.op in ['==', '!='] and len(values) > self._max_selectors:
filter_op = self.generate_filter_op()
self.filter = (
self.lhs,
filter_op,
pd.Index([v.value for v in values]))
return self
return None
# equality conditions
if self.op in ['==', '!=']:
filter_op = self.generate_filter_op()
self.filter = (
self.lhs,
filter_op,
pd.Index([v.value for v in values]))
else:
raise TypeError("passing a filterable condition to a non-table "
"indexer [{slf}]".format(slf=self))
return self
def generate_filter_op(self, invert=False):
if (self.op == '!=' and not invert) or (self.op == '==' and invert):
return lambda axis, vals: ~axis.isin(vals)
else:
return lambda axis, vals: axis.isin(vals)
class JointFilterBinOp(FilterBinOp):
def format(self):
raise NotImplementedError("unable to collapse Joint Filters")
def evaluate(self):
return self
class ConditionBinOp(BinOp):
def __unicode__(self):
return pprint_thing("[Condition : [{cond}]]"
.format(cond=self.condition))
def invert(self):
""" invert the condition """
# if self.condition is not None:
# self.condition = "~(%s)" % self.condition
# return self
raise NotImplementedError("cannot use an invert condition when "
"passing to numexpr")
def format(self):
""" return the actual ne format """
return self.condition
def evaluate(self):
if not self.is_valid:
raise ValueError("query term is not valid [{slf}]"
.format(slf=self))
# convert values if we are in the table
if not self.is_in_table:
return None
rhs = self.conform(self.rhs)
values = [self.convert_value(v) for v in rhs]
# equality conditions
if self.op in ['==', '!=']:
# too many values to create the expression?
if len(values) <= self._max_selectors:
vs = [self.generate(v) for v in values]
self.condition = "({cond})".format(cond=' | '.join(vs))
# use a filter after reading
else:
return None
else:
self.condition = self.generate(values[0])
return self
class JointConditionBinOp(ConditionBinOp):
def evaluate(self):
self.condition = "({lhs} {op} {rhs})".format(lhs=self.lhs.condition,
op=self.op,
rhs=self.rhs.condition)
return self
class UnaryOp(ops.UnaryOp):
def prune(self, klass):
if self.op != '~':
raise NotImplementedError("UnaryOp only support invert type ops")
operand = self.operand
operand = operand.prune(klass)
if operand is not None:
if issubclass(klass, ConditionBinOp):
if operand.condition is not None:
return operand.invert()
elif issubclass(klass, FilterBinOp):
if operand.filter is not None:
return operand.invert()
return None
_op_classes = {'unary': UnaryOp}
class ExprVisitor(BaseExprVisitor):
const_type = Constant
term_type = Term
def __init__(self, env, engine, parser, **kwargs):
super(ExprVisitor, self).__init__(env, engine, parser)
for bin_op in self.binary_ops:
bin_node = self.binary_op_nodes_map[bin_op]
setattr(self, 'visit_{node}'.format(node=bin_node),
lambda node, bin_op=bin_op: partial(BinOp, bin_op,
**kwargs))
def visit_UnaryOp(self, node, **kwargs):
if isinstance(node.op, (ast.Not, ast.Invert)):
return UnaryOp('~', self.visit(node.operand))
elif isinstance(node.op, ast.USub):
return self.const_type(-self.visit(node.operand).value, self.env)
elif isinstance(node.op, ast.UAdd):
raise NotImplementedError('Unary addition not supported')
def visit_Index(self, node, **kwargs):
return self.visit(node.value).value
def visit_Assign(self, node, **kwargs):
cmpr = ast.Compare(ops=[ast.Eq()], left=node.targets[0],
comparators=[node.value])
return self.visit(cmpr)
def visit_Subscript(self, node, **kwargs):
# only allow simple suscripts
value = self.visit(node.value)
slobj = self.visit(node.slice)
try:
value = value.value
except AttributeError:
pass
try:
return self.const_type(value[slobj], self.env)
except TypeError:
raise ValueError("cannot subscript {value!r} with "
"{slobj!r}".format(value=value, slobj=slobj))
def visit_Attribute(self, node, **kwargs):
attr = node.attr
value = node.value
ctx = node.ctx.__class__
if ctx == ast.Load:
# resolve the value
resolved = self.visit(value)
# try to get the value to see if we are another expression
try:
resolved = resolved.value
except (AttributeError):
pass
try:
return self.term_type(getattr(resolved, attr), self.env)
except AttributeError:
# something like datetime.datetime where scope is overridden
if isinstance(value, ast.Name) and value.id == attr:
return resolved
raise ValueError("Invalid Attribute context {name}"
.format(name=ctx.__name__))
def translate_In(self, op):
return ast.Eq() if isinstance(op, ast.In) else op
def _rewrite_membership_op(self, node, left, right):
return self.visit(node.op), node.op, left, right
def _validate_where(w):
"""
Validate that the where statement is of the right type.
The type may either be String, Expr, or list-like of Exprs.
Parameters
----------
w : String term expression, Expr, or list-like of Exprs.
Returns
-------
where : The original where clause if the check was successful.
Raises
------
TypeError : An invalid data type was passed in for w (e.g. dict).
"""
if not (isinstance(w, (Expr, string_types)) or is_list_like(w)):
raise TypeError("where must be passed as a string, Expr, "
"or list-like of Exprs")
return w
class Expr(expr.Expr):
""" hold a pytables like expression, comprised of possibly multiple 'terms'
Parameters
----------
where : string term expression, Expr, or list-like of Exprs
queryables : a "kinds" map (dict of column name -> kind), or None if column
is non-indexable
encoding : an encoding that will encode the query terms
Returns
-------
an Expr object
Examples
--------
'index>=date'
"columns=['A', 'D']"
'columns=A'
'columns==A'
"~(columns=['A','B'])"
'index>df.index[3] & string="bar"'
'(index>df.index[3] & index<=df.index[6]) | string="bar"'
"ts>=Timestamp('2012-02-01')"
"major_axis>=20130101"
"""
def __init__(self, where, queryables=None, encoding=None, scope_level=0):
where = _validate_where(where)
self.encoding = encoding
self.condition = None
self.filter = None
self.terms = None
self._visitor = None
# capture the environment if needed
local_dict = DeepChainMap()
if isinstance(where, Expr):
local_dict = where.env.scope
where = where.expr
elif isinstance(where, (list, tuple)):
for idx, w in enumerate(where):
if isinstance(w, Expr):
local_dict = w.env.scope
else:
w = _validate_where(w)
where[idx] = w
where = ' & '.join(map('({})'.format, com.flatten(where))) # noqa
self.expr = where
self.env = Scope(scope_level + 1, local_dict=local_dict)
if queryables is not None and isinstance(self.expr, string_types):
self.env.queryables.update(queryables)
self._visitor = ExprVisitor(self.env, queryables=queryables,
parser='pytables', engine='pytables',
encoding=encoding)
self.terms = self.parse()
def __unicode__(self):
if self.terms is not None:
return pprint_thing(self.terms)
return pprint_thing(self.expr)
def evaluate(self):
""" create and return the numexpr condition and filter """
try:
self.condition = self.terms.prune(ConditionBinOp)
except AttributeError:
raise ValueError("cannot process expression [{expr}], [{slf}] "
"is not a valid condition".format(expr=self.expr,
slf=self))
try:
self.filter = self.terms.prune(FilterBinOp)
except AttributeError:
raise ValueError("cannot process expression [{expr}], [{slf}] "
"is not a valid filter".format(expr=self.expr,
slf=self))
return self.condition, self.filter
class TermValue(object):
""" hold a term value the we use to construct a condition/filter """
def __init__(self, value, converted, kind):
self.value = value
self.converted = converted
self.kind = kind
def tostring(self, encoding):
""" quote the string if not encoded
else encode and return """
if self.kind == u'string':
if encoding is not None:
return self.converted
return '"{converted}"'.format(converted=self.converted)
elif self.kind == u'float':
# python 2 str(float) is not always
# round-trippable so use repr()
return repr(self.converted)
return self.converted
def maybe_expression(s):
""" loose checking if s is a pytables-acceptable expression """
if not isinstance(s, string_types):
return False
ops = ExprVisitor.binary_ops + ExprVisitor.unary_ops + ('=',)
# make sure we have an op at least
return any(op in s for op in ops)
|
GuessWhoSamFoo/pandas
|
pandas/core/computation/pytables.py
|
Python
|
bsd-3-clause
| 19,388
|
[
"VisIt"
] |
376cd7c069e72d730dbd87e0169836e8714c6ef74f5c998d5c56b9445497498f
|
#!/usr/bin/env python
import shutil
import tempfile
import configparser
from textwrap import dedent
import tarfile
import pyaml
import hashlib
import os
import re
import bs4
import urllib
from urllib import request
from urllib import parse
from urllib import error
from collections import OrderedDict
import logging
import requests
logging.basicConfig(level=logging.INFO, format='[bioconductor_skeleton.py %(asctime)s]: %(message)s')
logger = logging.getLogger()
logging.getLogger("requests").setLevel(logging.WARNING)
base_url = 'http://bioconductor.org/packages/'
# Packages that might be specified in the DESCRIPTION of a package as
# dependencies, but since they're built-in we don't need to specify them in
# the meta.yaml.
#
# Note: this list is from:
#
# conda create -n rtest -c r r
# R -e "rownames(installed.packages())"
BASE_R_PACKAGES = ["base", "boot", "class", "cluster", "codetools", "compiler",
"datasets", "foreign", "graphics", "grDevices", "grid",
"KernSmooth", "lattice", "MASS", "Matrix", "methods",
"mgcv", "nlme", "nnet", "parallel", "rpart", "spatial",
"splines", "stats", "stats4", "survival", "tcltk", "tools",
"utils"]
# A list of packages, in recipe name format
GCC_PACKAGES = ['r-rcpp']
HERE = os.path.abspath(os.path.dirname(__file__))
class PageNotFoundError(Exception): pass
class BioCProjectPage(object):
def __init__(self, package):
"""
Represents a single Bioconductor package page and provides access to
scraped data.
>>> x = BioCProjectPage('DESeq2')
>>> x.tarball_url
'http://bioconductor.org/packages/release/bioc/src/contrib/DESeq2_1.8.2.tar.gz'
"""
self.base_url = base_url
self.package = package
self._md5 = None
self._cached_tarball = None
self._dependencies = None
self.build_number = 0
self.request = requests.get(os.path.join(base_url, package))
if not self.request:
raise PageNotFoundError('Error {0.status_code} ({0.reason})'.format(self.request))
# Since we provide the "short link" we will get redirected. Using
# requests allows us to keep track of the final destination URL, which
# we need for reconstructing the tarball URL.
self.url = self.request.url
# The table at the bottom of the page has the info we want. An earlier
# draft of this script parsed the dependencies from the details table.
# That's still an option if we need a double-check on the DESCRIPTION
# fields.
self.soup = bs4.BeautifulSoup(
self.request.content,
'html.parser')
self.details_table = self.soup.find_all(attrs={'class': 'details'})[0]
# However, it is helpful to get the version info from this table. That
# way we can try getting the bioaRchive tarball and cache that.
for td in self.details_table.findAll('td'):
if td.getText() == 'Version':
version = td.findNext().getText()
break
self.version = version
self.depends_on_gcc = False
@property
def bioaRchive_url(self):
"""
Returns the bioaRchive URL if one exists for this version of this
package, otherwise returns None.
Note that to get the package version, we're still getting the
bioconductor tarball to extract the DESCRIPTION file.
"""
url = 'https://bioarchive.galaxyproject.org/{0.package}_{0.version}.tar.gz'.format(self)
response = requests.get(url)
if response:
return url
elif response.status_code == 404:
return
else:
raise PageNotFoundError("Unexpected error: {0.status_code} ({0.reason})".format(response))
@property
def bioconductor_tarball_url(self):
"""
Return the url to the tarball from the bioconductor site.
"""
r = re.compile('{0}.*\.tar.gz'.format(self.package))
def f(href):
return href and r.search(href)
results = self.soup.find_all(href=f)
assert len(results) == 1, (
"Found {0} tags with '.tar.gz' in href".format(len(results)))
s = list(results[0].stripped_strings)
assert len(s) == 1
# build the actual URL based on the identified package name and the
# relative URL from the source. Here we're just hard-coding
# '../src/contrib' based on the structure of the bioconductor site.
return os.path.join(parse.urljoin(self.url, '../src/contrib'), s[0])
@property
def tarball_url(self):
url = self.bioaRchive_url
if url:
return url
return self.bioconductor_tarball_url
@property
def tarball_basename(self):
return os.path.basename(self.tarball_url)
@property
def cached_tarball(self):
"""
Downloads the tarball to the `cached_bioconductor_tarballs` dir if one
hasn't already been downloaded for this package.
This is because we need the whole tarball to get the DESCRIPTION file
and to generate an md5 hash, so we might as well save it somewhere.
"""
if self._cached_tarball:
return self._cached_tarball
cache_dir = os.path.join(HERE, 'cached_bioconductor_tarballs')
if not os.path.exists(cache_dir):
os.makedirs(cache_dir)
fn = os.path.join(cache_dir, self.tarball_basename)
if os.path.exists(fn):
self._cached_tarball = fn
return fn
tmp = tempfile.NamedTemporaryFile(delete=False).name
with open(tmp, 'wb') as fout:
logger.info('Downloading {0} to {1}'.format(self.tarball_url, fn))
response = requests.get(self.tarball_url)
if response:
fout.write(response.content)
else:
raise PageNotFoundError('Unexpected error {0.status_code} ({0.reason})'.format(response))
shutil.move(tmp, fn)
self._cached_tarball = fn
return fn
@property
def description(self):
"""
Extract the DESCRIPTION file from the tarball and parse it.
"""
t = tarfile.open(self.cached_tarball)
d = t.extractfile(os.path.join(self.package, 'DESCRIPTION')).read()
self._contents = d
c = configparser.ConfigParser(strict=False)
# On-spec config files need a "section", but the DESCRIPTION file
# doesn't have one. So we just add a fake section, and let the
# configparser take care of the details of parsing.
c.read_string('[top]\n' + d.decode('UTF-8'))
e = c['top']
# Glue together newlines
for k in e.keys():
e[k] = e[k].replace('\n', ' ')
return dict(e)
#@property
#def version(self):
# return self.description['version']
@property
def license(self):
return self.description['license']
@property
def imports(self):
try:
return self.description['imports'].split(', ')
except KeyError:
return []
@property
def depends(self):
try:
return self.description['depends'].split(', ')
except KeyError:
return []
def _parse_dependencies(self, items):
"""
The goal is to go from
['package1', 'package2', 'package3 (>= 0.1)', 'package4']
to::
[
('package1', ""),
('package2', ""),
('package3', " >=0.1"),
('package1', ""),
]
"""
results = []
for item in items:
toks = [i.strip() for i in item.split('(')]
if len(toks) == 1:
results.append((toks[0], ""))
elif len(toks) == 2:
assert ')' in toks[1]
toks[1] = toks[1].replace(')', '').replace(' ', '')
results.append(tuple(toks))
else:
raise ValueError("Found {0} toks: {1}".format(len(toks), toks))
return results
@property
def dependencies(self):
if self._dependencies:
return self._dependencies
results = []
# Some packages specify a minimum R version, which we'll need to keep
# track of
specific_r_version = False
# Sometimes a version is specified only in the `depends` and not in the
# `imports`. We keep the most specific version of each.
version_specs = list(
set(
self._parse_dependencies(self.imports) +
self._parse_dependencies(self.depends)
)
)
versions = {}
for name, version in version_specs:
if name in versions:
if not versions[name] and version:
versions[name] = version
else:
versions[name] = version
for name, version in sorted(versions.items()):
# DESCRIPTION notes base R packages, but we don't need to specify
# them in the dependencies.
if name in BASE_R_PACKAGES:
continue
# Try finding the dependency on the bioconductor site; if it can't
# be found then we assume it's in CRAN.
try:
BioCProjectPage(name)
prefix = 'bioconductor-'
except PageNotFoundError:
prefix = 'r-'
logger.info('{0:>12} dependency: name="{1}" version="{2}"'.format(
{'r-': 'R', 'bioconductor-': 'BioConductor'}[prefix],
name, version))
# add padding to version string
if version:
version = " " + version
if name.lower() == 'r':
# Had some issues with CONDA_R finding the right version if "r"
# had version restrictions. Since we're generally building
# up-to-date packages, we can just use "r".
# # "r >=2.5" rather than "r-r >=2.5"
# specific_r_version = True
# results.append(name.lower() + version)
# results.append('r')
pass
else:
results.append(prefix + name.lower() + version)
if prefix + name.lower() in GCC_PACKAGES:
self.depends_on_gcc = True
# Add R itself
results.append('r-base')
self._dependencies = results
return self._dependencies
@property
def md5(self):
"""
Calculate the md5 hash of the tarball so it can be filled into the
meta.yaml.
"""
if self._md5 is None:
self._md5 = hashlib.md5(
open(self.cached_tarball, 'rb').read()).hexdigest()
return self._md5
@property
def meta_yaml(self):
"""
Build the meta.yaml string based on discovered values.
Here we use a nested OrderedDict so that all meta.yaml files created by
this script have the same consistent format. Otherwise we're at the
mercy of Python dict sorting.
We use pyaml (rather than yaml) because it has better handling of
OrderedDicts.
However pyaml does not support comments, but if there are gcc and llvm
dependencies then they need to be added with preprocessing selectors
for `# [linux]` and `# [osx]`.
We do this with a unique placeholder (not a jinja or $-based
string.Template so as to avoid conflicting with the conda jinja
templating or the `$R` in the test commands, and replace the text once
the yaml is written.
"""
url = self.bioaRchive_url
if not url:
url = self.tarball_url
DEPENDENCIES = sorted(self.dependencies)
d = OrderedDict((
(
'package', OrderedDict((
('name', 'bioconductor-' + self.package.lower()),
('version', self.version),
)),
),
(
'source', OrderedDict((
('fn', self.tarball_basename),
('url', url),
('md5', self.md5),
)),
),
(
'build', OrderedDict((
('number', self.build_number),
('rpaths', ['lib/R/lib/', 'lib/']),
)),
),
(
'requirements', OrderedDict((
# If you don't make copies, pyaml sees these as the same
# object and tries to make a shortcut, causing an error in
# decoding unicode. Possible pyaml bug? Anyway, this fixes
# it.
('build', DEPENDENCIES[:]),
('run', DEPENDENCIES[:]),
)),
),
(
'test', OrderedDict((
('commands',
['''$R -e "library('{package}')"'''.format(
package=self.package)]),
)),
),
(
'about', OrderedDict((
('home', self.url),
('license', self.license),
('summary', self.description['description']),
)),
),
))
if self.depends_on_gcc:
d['requirements']['build'].append('GCC_PLACEHOLDER')
d['requirements']['build'].append('LLVM_PLACEHOLDER')
rendered = pyaml.dumps(d).decode('utf-8')
rendered = rendered.replace('GCC_PLACEHOLDER', 'gcc # [linux]')
rendered = rendered.replace('LLVM_PLACEHOLDER', 'llvm # [osx]')
return rendered
def write_recipe(package, recipe_dir, force=False):
"""
Write the meta.yaml and build.sh files.
"""
proj = BioCProjectPage(package)
recipe_dir = os.path.join(recipe_dir, 'bioconductor-' + proj.package.lower())
if os.path.exists(recipe_dir) and not force:
raise ValueError("{0} already exists, aborting".format(recipe_dir))
else:
if not os.path.exists(recipe_dir):
print('creating %s' % recipe_dir)
os.makedirs(recipe_dir)
# If the version number has not changed but something else in the recipe
# *has* changed, then bump the version number.
meta_file = os.path.join(recipe_dir, 'meta.yaml')
if os.path.exists(meta_file):
updated_meta = pyaml.yaml.load(proj.meta_yaml)
current_meta = pyaml.yaml.load(open(meta_file))
# pop off the version and build numbers so we can compare the rest of
# the dicts
updated_version = updated_meta['package'].pop('version')
current_version = current_meta['package'].pop('version')
updated_build_number = updated_meta['build'].pop('number')
current_build_number = current_meta['build'].pop('number')
if (
(updated_version == current_version)
and
(updated_meta != current_meta)
):
proj.build_number = int(current_build_number) + 1
with open(os.path.join(recipe_dir, 'meta.yaml'), 'w') as fout:
fout.write(proj.meta_yaml)
with open(os.path.join(recipe_dir, 'build.sh'), 'w') as fout:
fout.write(dedent(
"""
#!/bin/bash
# R refuses to build packages that mark themselves as
# "Priority: Recommended"
mv DESCRIPTION DESCRIPTION.old
grep -v '^Priority: ' DESCRIPTION.old > DESCRIPTION
#
$R CMD INSTALL --build .
#
# # Add more build steps here, if they are necessary.
#
# See
# http://docs.continuum.io/conda/build.html
# for a list of environment variables that are set during the build
# process.
# """
)
)
if __name__ == "__main__":
import argparse
ap = argparse.ArgumentParser()
ap.add_argument('package', help='Bioconductor package name')
ap.add_argument('--recipes', default='recipes',
help='Recipe will be created in <recipe-dir>/<package>')
ap.add_argument('--force', action='store_true',
help='Overwrite the contents of an existing recipe')
args = ap.parse_args()
write_recipe(args.package, args.recipes, args.force)
|
guowei-he/bioconda-recipes
|
scripts/bioconductor/bioconductor_skeleton.py
|
Python
|
mit
| 16,642
|
[
"Bioconductor"
] |
641efeaf9c8407c4733e98e6b64a212e6637f50f666285e4b3323c7b0f7744e5
|
# coding: utf-8
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
"""
Works for Abinit
"""
from __future__ import unicode_literals, division, print_function
import os
import shutil
import time
import abc
import collections
import numpy as np
import six
import copy
from six.moves import filter
from monty.collections import AttrDict
from monty.itertools import chunks
from monty.functools import lazy_property
from monty.fnmatch import WildCard
from monty.dev import deprecated
from pydispatch import dispatcher
from pymatgen.core.units import EnergyArray
from . import wrappers
from .nodes import Dependency, Node, NodeError, NodeResults, check_spectator
from .tasks import (Task, AbinitTask, ScfTask, NscfTask, DfptTask, PhononTask, DdkTask,
BseTask, RelaxTask, DdeTask, BecTask, ScrTask, SigmaTask,
DteTask, EphTask, CollinearThenNonCollinearScfTask)
from .utils import Directory
from .netcdf import ETSF_Reader, NetcdfReader
from .abitimer import AbinitTimerParser
import logging
logger = logging.getLogger(__name__)
__author__ = "Matteo Giantomassi"
__copyright__ = "Copyright 2013, The Materials Project"
__version__ = "0.1"
__maintainer__ = "Matteo Giantomassi"
__all__ = [
"Work",
"BandStructureWork",
"RelaxWork",
"G0W0Work",
"QptdmWork",
"SigmaConvWork",
"BseMdfWork",
"PhononWork",
]
class WorkResults(NodeResults):
JSON_SCHEMA = NodeResults.JSON_SCHEMA.copy()
@classmethod
def from_node(cls, work):
"""Initialize an instance from a :class:`Work` instance."""
new = super(WorkResults, cls).from_node(work)
# Will put all files found in outdir in GridFs
# Warning: assuming binary files.
d = {os.path.basename(f): f for f in work.outdir.list_filepaths()}
new.register_gridfs_files(**d)
return new
class WorkError(NodeError):
"""Base class for the exceptions raised by Work objects."""
class BaseWork(six.with_metaclass(abc.ABCMeta, Node)):
Error = WorkError
Results = WorkResults
# interface modeled after subprocess.Popen
@property
@abc.abstractmethod
def processes(self):
"""Return a list of objects that support the `subprocess.Popen` protocol."""
def poll(self):
"""
Check if all child processes have terminated. Set and return returncode attribute.
"""
return [task.poll() for task in self]
def wait(self):
"""
Wait for child processed to terminate. Set and return returncode attribute.
"""
return [task.wait() for task in self]
def communicate(self, input=None):
"""
Interact with processes: Send data to stdin. Read data from stdout and
stderr, until end-of-file is reached.
Wait for process to terminate. The optional input argument should be a
string to be sent to the child processed, or None, if no data should be
sent to the children.
communicate() returns a list of tuples (stdoutdata, stderrdata).
"""
return [task.communicate(input) for task in self]
@property
def returncodes(self):
"""
The children return codes, set by poll() and wait() (and indirectly by communicate()).
A None value indicates that the process hasn't terminated yet.
A negative value -N indicates that the child was terminated by signal N (Unix only).
"""
return [task.returncode for task in self]
@property
def ncores_reserved(self):
"""
Returns the number of cores reserved in this moment.
A core is reserved if it's still not running but
we have submitted the task to the queue manager.
"""
return sum(task.manager.num_cores for task in self if task.status == task.S_SUB)
@property
def ncores_allocated(self):
"""
Returns the number of CPUs allocated in this moment.
A core is allocated if it's running a task or if we have
submitted a task to the queue manager but the job is still pending.
"""
return sum(task.manager.num_cores for task in self if task.status in [task.S_SUB, task.S_RUN])
@property
def ncores_used(self):
"""
Returns the number of cores used in this moment.
A core is used if there's a job that is running on it.
"""
return sum(task.manager.num_cores for task in self if task.status == task.S_RUN)
def fetch_task_to_run(self):
"""
Returns the first task that is ready to run or
None if no task can be submitted at present"
Raises:
`StopIteration` if all tasks are done.
"""
# All the tasks are done so raise an exception
# that will be handled by the client code.
if all(task.is_completed for task in self):
raise StopIteration("All tasks completed.")
for task in self:
if task.can_run:
return task
# No task found, this usually happens when we have dependencies.
# Beware of possible deadlocks here!
logger.warning("Possible deadlock in fetch_task_to_run!")
return None
def fetch_alltasks_to_run(self):
"""
Returns a list with all the tasks that can be submitted.
Empty list if not task has been found.
"""
return [task for task in self if task.can_run]
@abc.abstractmethod
def setup(self, *args, **kwargs):
"""Method called before submitting the calculations."""
def _setup(self, *args, **kwargs):
self.setup(*args, **kwargs)
def connect_signals(self):
"""
Connect the signals within the work.
The :class:`Work` is responsible for catching the important signals raised from
its task and raise new signals when some particular condition occurs.
"""
for task in self:
dispatcher.connect(self.on_ok, signal=task.S_OK, sender=task)
def disconnect_signals(self):
"""
Disable the signals within the work. This function reverses the process of `connect_signals`
"""
for task in self:
try:
dispatcher.disconnect(self.on_ok, signal=task.S_OK, sender=task)
except dispatcher.errors.DispatcherKeyError as exc:
logger.debug(str(exc))
@property
def all_ok(self):
return all(task.status == task.S_OK for task in self)
#@check_spectator
def on_ok(self, sender):
"""
This callback is called when one task reaches status `S_OK`.
It executes on_all_ok when all task in self have reached `S_OK`.
"""
logger.debug("in on_ok with sender %s" % sender)
if self.all_ok:
if self.finalized:
return AttrDict(returncode=0, message="Work has been already finalized")
else:
# Set finalized here, because on_all_ok might change it (e.g. Relax + EOS in a single work)
self.finalized = True
try:
results = AttrDict(**self.on_all_ok())
except Exception as exc:
self.history.critical("on_all_ok raises %s" % str(exc))
self.finalized = False
raise
# Signal to possible observers that the `Work` reached S_OK
self.history.info("Work %s is finalized and broadcasts signal S_OK" % str(self))
if self._finalized:
self.send_signal(self.S_OK)
return results
return AttrDict(returncode=1, message="Not all tasks are OK!")
#@check_spectator
def on_all_ok(self):
"""
This method is called once the `Work` is completed i.e. when all the tasks
have reached status S_OK. Subclasses should provide their own implementation
Returns:
Dictionary that must contain at least the following entries:
returncode:
0 on success.
message:
a string that should provide a human-readable description of what has been performed.
"""
return dict(returncode=0, message="Calling on_all_ok of the base class!")
def get_results(self, **kwargs):
"""
Method called once the calculations are completed.
The base version returns a dictionary task_name: TaskResults for each task in self.
"""
results = self.Results.from_node(self)
return results
class NodeContainer(six.with_metaclass(abc.ABCMeta)):
"""
Mixin classes for `Work` and `Flow` objects providing helper functions
to register tasks in the container. The helper function call the
`register` method of the container.
"""
# TODO: Abstract protocol for containers
@abc.abstractmethod
def register_task(self, *args, **kwargs):
"""
Register a task in the container.
"""
# TODO: shall flow.register_task return a Task or a Work?
# Helper functions
def register_scf_task(self, *args, **kwargs):
"""Register a Scf task."""
kwargs["task_class"] = ScfTask
return self.register_task(*args, **kwargs)
def register_collinear_then_noncollinear_scf_task(self, *args, **kwargs):
"""Register a Scf task that perform a SCF run first with nsppol = 2 and then nspinor = 2"""
kwargs["task_class"] = CollinearThenNonCollinearScfTask
return self.register_task(*args, **kwargs)
def register_nscf_task(self, *args, **kwargs):
"""Register a nscf task."""
kwargs["task_class"] = NscfTask
return self.register_task(*args, **kwargs)
def register_relax_task(self, *args, **kwargs):
"""Register a task for structural optimization."""
kwargs["task_class"] = RelaxTask
return self.register_task(*args, **kwargs)
def register_phonon_task(self, *args, **kwargs):
"""Register a phonon task."""
kwargs["task_class"] = PhononTask
return self.register_task(*args, **kwargs)
def register_ddk_task(self, *args, **kwargs):
"""Register a ddk task."""
kwargs["task_class"] = DdkTask
return self.register_task(*args, **kwargs)
def register_scr_task(self, *args, **kwargs):
"""Register a screening task."""
kwargs["task_class"] = ScrTask
return self.register_task(*args, **kwargs)
def register_sigma_task(self, *args, **kwargs):
"""Register a sigma task."""
kwargs["task_class"] = SigmaTask
return self.register_task(*args, **kwargs)
# TODO: Remove
def register_dde_task(self, *args, **kwargs):
"""Register a Dde task."""
kwargs["task_class"] = DdeTask
return self.register_task(*args, **kwargs)
def register_dte_task(self, *args, **kwargs):
"""Register a Dte task."""
kwargs["task_class"] = DteTask
return self.register_task(*args, **kwargs)
def register_bec_task(self, *args, **kwargs):
"""Register a BEC task."""
kwargs["task_class"] = BecTask
return self.register_task(*args, **kwargs)
def register_bse_task(self, *args, **kwargs):
"""Register a Bethe-Salpeter task."""
kwargs["task_class"] = BseTask
return self.register_task(*args, **kwargs)
def register_eph_task(self, *args, **kwargs):
"""Register an electron-phonon task."""
kwargs["task_class"] = EphTask
return self.register_task(*args, **kwargs)
def walknset_vars(self, task_class=None, *args, **kwargs):
"""
Set the values of the ABINIT variables in the input files of the nodes
Args:
task_class: If not None, only the input files of the tasks belonging
to class `task_class` are modified.
Example:
flow.walknset_vars(ecut=10, kptopt=4)
"""
def change_task(task):
if task_class is not None and task.__class__ is not task_class: return False
return True
if self.is_work:
for task in self:
if not change_task(task): continue
task.set_vars(*args, **kwargs)
elif self.is_flow:
for task in self.iflat_tasks():
if not change_task(task): continue
task.set_vars(*args, **kwargs)
else:
raise TypeError("Don't know how to set variables for object class %s" % self.__class__.__name__)
class Work(BaseWork, NodeContainer):
"""
A Work is a list of (possibly connected) tasks.
"""
def __init__(self, workdir=None, manager=None):
"""
Args:
workdir: Path to the working directory.
manager: :class:`TaskManager` object.
"""
super(Work, self).__init__()
self._tasks = []
if workdir is not None:
self.set_workdir(workdir)
if manager is not None:
self.set_manager(manager)
def set_manager(self, manager):
"""Set the :class:`TaskManager` to use to launch the :class:`Task`."""
self.manager = manager.deepcopy()
for task in self:
task.set_manager(manager)
@property
def flow(self):
"""The flow containing this :class:`Work`."""
return self._flow
def set_flow(self, flow):
"""Set the flow associated to this :class:`Work`."""
if not hasattr(self, "_flow"):
self._flow = flow
else:
if self._flow != flow:
raise ValueError("self._flow != flow")
@lazy_property
def pos(self):
"""The position of self in the :class:`Flow`"""
for i, work in enumerate(self.flow):
if self == work:
return i
raise ValueError("Cannot find the position of %s in flow %s" % (self, self.flow))
@property
def pos_str(self):
"""String representation of self.pos"""
return "w" + str(self.pos)
def set_workdir(self, workdir, chroot=False):
"""Set the working directory. Cannot be set more than once unless chroot is True"""
if not chroot and hasattr(self, "workdir") and self.workdir != workdir:
raise ValueError("self.workdir != workdir: %s, %s" % (self.workdir, workdir))
self.workdir = os.path.abspath(workdir)
# Directories with (input|output|temporary) data.
# The work will use these directories to connect
# itself to other works and/or to produce new data
# that will be used by its children.
self.indir = Directory(os.path.join(self.workdir, "indata"))
self.outdir = Directory(os.path.join(self.workdir, "outdata"))
self.tmpdir = Directory(os.path.join(self.workdir, "tmpdata"))
def chroot(self, new_workdir):
self.set_workdir(new_workdir, chroot=True)
for i, task in enumerate(self):
new_tdir = os.path.join(self.workdir, "t" + str(i))
task.set_workdir(new_tdir, chroot=True)
def __len__(self):
return len(self._tasks)
def __iter__(self):
return self._tasks.__iter__()
def __getitem__(self, slice):
return self._tasks[slice]
def chunks(self, chunk_size):
"""Yield successive chunks of tasks of lenght chunk_size."""
for tasks in chunks(self, chunk_size):
yield tasks
def opath_from_ext(self, ext):
"""
Returns the path of the output file with extension ext.
Use it when the file does not exist yet.
"""
return self.indir.path_in("in_" + ext)
def opath_from_ext(self, ext):
"""
Returns the path of the output file with extension ext.
Use it when the file does not exist yet.
"""
return self.outdir.path_in("out_" + ext)
@property
def processes(self):
return [task.process for task in self]
@property
def all_done(self):
"""True if all the :class:`Task` objects in the :class:`Work` are done."""
return all(task.status >= task.S_DONE for task in self)
@property
def isnc(self):
"""True if norm-conserving calculation."""
return all(task.isnc for task in self)
@property
def ispaw(self):
"""True if PAW calculation."""
return all(task.ispaw for task in self)
@property
def status_counter(self):
"""
Returns a `Counter` object that counts the number of task with
given status (use the string representation of the status as key).
"""
counter = collections.Counter()
for task in self:
counter[str(task.status)] += 1
return counter
def allocate(self, manager=None):
"""
This function is called once we have completed the initialization
of the :class:`Work`. It sets the manager of each task (if not already done)
and defines the working directories of the tasks.
Args:
manager: :class:`TaskManager` object or None
"""
for i, task in enumerate(self):
if not hasattr(task, "manager"):
# Set the manager
# Use the one provided in input else the one of the work/flow.
if manager is not None:
task.set_manager(manager)
else:
# Look first in work and then in the flow.
if hasattr(self, "manager"):
task.set_manager(self.manager)
else:
task.set_manager(self.flow.manager)
task_workdir = os.path.join(self.workdir, "t" + str(i))
if not hasattr(task, "workdir"):
task.set_workdir(task_workdir)
else:
if task.workdir != task_workdir:
raise ValueError("task.workdir != task_workdir: %s, %s" % (task.workdir, task_workdir))
def register(self, obj, deps=None, required_files=None, manager=None, task_class=None):
"""
Registers a new :class:`Task` and add it to the internal list, taking into account possible dependencies.
Args:
obj: :class:`AbinitInput` instance.
deps: Dictionary specifying the dependency of this node.
None means that this obj has no dependency.
required_files: List of strings with the path of the files used by the task.
Note that the files must exist when the task is registered.
Use the standard approach based on Works, Tasks and deps
if the files will be produced in the future.
manager:
The :class:`TaskManager` responsible for the submission of the task. If manager is None, we use
the `TaskManager` specified during the creation of the :class:`Work`.
task_class: Task subclass to instantiate. Default: :class:`AbinitTask`
Returns:
:class:`Task` object
"""
task_workdir = None
if hasattr(self, "workdir"):
task_workdir = os.path.join(self.workdir, "t" + str(len(self)))
if isinstance(obj, Task):
task = obj
else:
# Set the class
if task_class is None:
task_class = AbinitTask
task = task_class.from_input(obj, task_workdir, manager)
self._tasks.append(task)
# Handle possible dependencies.
if deps is not None:
deps = [Dependency(node, exts) for node, exts in deps.items()]
task.add_deps(deps)
# Handle possible dependencies.
if required_files is not None:
task.add_required_files(required_files)
return task
# Needed by NodeContainer
register_task = register
def path_in_workdir(self, filename):
"""Create the absolute path of filename in the working directory."""
return os.path.join(self.workdir, filename)
def setup(self, *args, **kwargs):
"""
Method called before running the calculations.
The default implementation is empty.
"""
def build(self, *args, **kwargs):
"""Creates the top level directory."""
# Create the directories of the work.
self.indir.makedirs()
self.outdir.makedirs()
self.tmpdir.makedirs()
# Build dirs and files of each task.
for task in self:
task.build(*args, **kwargs)
# Connect signals within the work.
self.connect_signals()
@property
def status(self):
"""
Returns the status of the work i.e. the minimum of the status of the tasks.
"""
return self.get_all_status(only_min=True)
def get_all_status(self, only_min=False):
"""
Returns a list with the status of the tasks in self.
Args:
only_min: If True, the minimum of the status is returned.
"""
if len(self) == 0:
# The work will be created in the future.
if only_min:
return self.S_INIT
else:
return [self.S_INIT]
self.check_status()
status_list = [task.status for task in self]
if only_min:
return min(status_list)
else:
return status_list
def check_status(self):
"""Check the status of the tasks."""
# Recompute the status of the tasks
# Ignore OK and LOCKED tasks.
for task in self:
if task.status in (task.S_OK, task.S_LOCKED): continue
task.check_status()
# Take into account possible dependencies. Use a list instead of generators
for task in self:
if task.status == task.S_LOCKED: continue
if task.status < task.S_SUB and all(status == task.S_OK for status in task.deps_status):
task.set_status(task.S_READY, "Status set to Ready")
def rmtree(self, exclude_wildcard=""):
"""
Remove all files and directories in the working directory
Args:
exclude_wildcard: Optional string with regular expressions separated by `|`.
Files matching one of the regular expressions will be preserved.
example: exclude_wildard="*.nc|*.txt" preserves all the files
whose extension is in ["nc", "txt"].
"""
if not exclude_wildcard:
shutil.rmtree(self.workdir)
else:
w = WildCard(exclude_wildcard)
for dirpath, dirnames, filenames in os.walk(self.workdir):
for fname in filenames:
path = os.path.join(dirpath, fname)
if not w.match(fname):
os.remove(path)
def rm_indatadir(self):
"""Remove all the indata directories."""
for task in self:
task.rm_indatadir()
def rm_outdatadir(self):
"""Remove all the indata directories."""
for task in self:
task.rm_outatadir()
def rm_tmpdatadir(self):
"""Remove all the tmpdata directories."""
for task in self:
task.rm_tmpdatadir()
def move(self, dest, isabspath=False):
"""
Recursively move self.workdir to another location. This is similar to the Unix "mv" command.
The destination path must not already exist. If the destination already exists
but is not a directory, it may be overwritten depending on os.rename() semantics.
Be default, dest is located in the parent directory of self.workdir, use isabspath=True
to specify an absolute path.
"""
if not isabspath:
dest = os.path.join(os.path.dirname(self.workdir), dest)
shutil.move(self.workdir, dest)
def submit_tasks(self, wait=False):
"""
Submits the task in self and wait.
TODO: change name.
"""
for task in self:
task.start()
if wait:
for task in self: task.wait()
def start(self, *args, **kwargs):
"""
Start the work. Calls build and _setup first, then submit the tasks.
Non-blocking call unless wait is set to True
"""
wait = kwargs.pop("wait", False)
# Initial setup
self._setup(*args, **kwargs)
# Build dirs and files.
self.build(*args, **kwargs)
# Submit tasks (does not block)
self.submit_tasks(wait=wait)
def read_etotals(self, unit="Ha"):
"""
Reads the total energy from the GSR file produced by the task.
Return a numpy array with the total energies in Hartree
The array element is set to np.inf if an exception is raised while reading the GSR file.
"""
if not self.all_done:
raise self.Error("Some task is still in running/submitted state")
etotals = []
for task in self:
# Open the GSR file and read etotal (Hartree)
gsr_path = task.outdir.has_abiext("GSR")
etot = np.inf
if gsr_path:
with ETSF_Reader(gsr_path) as r:
etot = r.read_value("etotal")
etotals.append(etot)
return EnergyArray(etotals, "Ha").to(unit)
def parse_timers(self):
"""
Parse the TIMER section reported in the ABINIT output files.
Returns:
:class:`AbinitTimerParser` object
"""
filenames = list(filter(os.path.exists, [task.output_file.path for task in self]))
parser = AbinitTimerParser()
parser.parse(filenames)
return parser
class BandStructureWork(Work):
"""Work for band structure calculations."""
def __init__(self, scf_input, nscf_input, dos_inputs=None, workdir=None, manager=None):
"""
Args:
scf_input: Input for the SCF run
nscf_input: Input for the NSCF run defining the band structure calculation.
dos_inputs: Input(s) for the DOS. DOS is computed only if dos_inputs is not None.
workdir: Working directory.
manager: :class:`TaskManager` object.
"""
super(BandStructureWork, self).__init__(workdir=workdir, manager=manager)
# Register the GS-SCF run.
self.scf_task = self.register_scf_task(scf_input)
# Register the NSCF run and its dependency.
self.nscf_task = self.register_nscf_task(nscf_input, deps={self.scf_task: "DEN"})
# Add DOS computation(s) if requested.
self.dos_tasks = []
if dos_inputs is not None:
if not isinstance(dos_inputs, (list, tuple)):
dos_inputs = [dos_inputs]
for dos_input in dos_inputs:
dos_task = self.register_nscf_task(dos_input, deps={self.scf_task: "DEN"})
self.dos_tasks.append(dos_task)
def plot_ebands(self, **kwargs):
"""
Plot the band structure. kwargs are passed to the plot method of :class:`ElectronBands`.
Returns:
`matplotlib` figure
"""
with self.nscf_task.open_gsr() as gsr:
return gsr.ebands.plot(**kwargs)
def plot_ebands_with_edos(self, dos_pos=0, method="gaussian", step=0.01, width=0.1, **kwargs):
"""
Plot the band structure and the DOS.
Args:
dos_pos: Index of the task from which the DOS should be obtained (note: 0 refers to the first DOS task).
method: String defining the method for the computation of the DOS.
step: Energy step (eV) of the linear mesh.
width: Standard deviation (eV) of the gaussian.
kwargs: Keyword arguments passed to `plot_with_edos` method to customize the plot.
Returns:
`matplotlib` figure.
"""
with self.nscf_task.open_gsr() as gsr:
gs_ebands = gsr.ebands
with self.dos_tasks[dos_pos].open_gsr() as gsr:
dos_ebands = gsr.ebands
edos = dos_ebands.get_edos(method=method, step=step, width=width)
return gs_ebands.plot_with_edos(edos, **kwargs)
def plot_edoses(self, dos_pos=None, method="gaussian", step=0.01, width=0.1, **kwargs):
"""
Plot the band structure and the DOS.
Args:
dos_pos: Index of the task from which the DOS should be obtained.
None is all DOSes should be displayed. Accepts integer or list of integers.
method: String defining the method for the computation of the DOS.
step: Energy step (eV) of the linear mesh.
width: Standard deviation (eV) of the gaussian.
kwargs: Keyword arguments passed to `plot` method to customize the plot.
Returns:
`matplotlib` figure.
"""
if dos_pos is not None and not isinstance(dos_pos, (list, tuple)): dos_pos = [dos_pos]
from abipy.electrons.ebands import ElectronDosPlotter
plotter = ElectronDosPlotter()
for i, task in enumerate(self.dos_tasks):
if dos_pos is not None and i not in dos_pos: continue
with task.open_gsr() as gsr:
edos = gsr.ebands.get_edos(method=method, step=step, width=width)
ngkpt = task.get_inpvar("ngkpt")
plotter.add_edos("ngkpt %s" % str(ngkpt), edos)
return plotter.combiplot(**kwargs)
class RelaxWork(Work):
"""
Work for structural relaxations. The first task relaxes the atomic position
while keeping the unit cell parameters fixed. The second task uses the final
structure to perform a structural relaxation in which both the atomic positions
and the lattice parameters are optimized.
"""
def __init__(self, ion_input, ioncell_input, workdir=None, manager=None, target_dilatmx=None):
"""
Args:
ion_input: Input for the relaxation of the ions (cell is fixed)
ioncell_input: Input for the relaxation of the ions and the unit cell.
workdir: Working directory.
manager: :class:`TaskManager` object.
"""
super(RelaxWork, self).__init__(workdir=workdir, manager=manager)
self.ion_task = self.register_relax_task(ion_input)
# Note:
# 1) It would be nice to restart from the WFK file but ABINIT crashes due to the
# different unit cell parameters if paral_kgb == 1
#paral_kgb = ion_input[0]["paral_kgb"]
#if paral_kgb == 1:
#deps = {self.ion_task: "WFK"} # --> FIXME: Problem in rwwf
#deps = {self.ion_task: "DEN"}
deps = None
self.ioncell_task = self.register_relax_task(ioncell_input, deps=deps)
# Lock ioncell_task as ion_task should communicate to ioncell_task that
# the calculation is OK and pass the final structure.
self.ioncell_task.lock(source_node=self)
self.transfer_done = False
self.target_dilatmx = target_dilatmx
#@check_spectator
def on_ok(self, sender):
"""
This callback is called when one task reaches status S_OK.
If sender == self.ion_task, we update the initial structure
used by self.ioncell_task and we unlock it so that the job can be submitted.
"""
logger.debug("in on_ok with sender %s" % sender)
if sender == self.ion_task and not self.transfer_done:
# Get the relaxed structure from ion_task
ion_structure = self.ion_task.get_final_structure()
# Transfer it to the ioncell task (we do it only once).
self.ioncell_task._change_structure(ion_structure)
self.transfer_done = True
# Unlock ioncell_task so that we can submit it.
self.ioncell_task.unlock(source_node=self)
elif sender == self.ioncell_task and self.target_dilatmx:
actual_dilatmx = self.ioncell_task.get_inpvar('dilatmx', 1.)
if self.target_dilatmx < actual_dilatmx:
self.ioncell_task.reduce_dilatmx(target=self.target_dilatmx)
logger.info('Converging dilatmx. Value reduce from {} to {}.'
.format(actual_dilatmx, self.ioncell_task.get_inpvar('dilatmx')))
self.ioncell_task.reset_from_scratch()
return super(RelaxWork, self).on_ok(sender)
def plot_ion_relaxation(self, **kwargs):
"""
Plot the history of the ion-cell relaxation.
kwargs are passed to the plot method of :class:`HistFile`
Return `matplotlib` figure or None if hist file is not found.
"""
with self.ion_task.open_hist() as hist:
return hist.plot(**kwargs) if hist else None
def plot_ioncell_relaxation(self, **kwargs):
"""
Plot the history of the ion-cell relaxation.
kwargs are passed to the plot method of :class:`HistFile`
Return `matplotlib` figure or None if hist file is not found.
"""
with self.ioncell_task.open_hist() as hist:
return hist.plot(**kwargs) if hist else None
class G0W0Work(Work):
"""
Work for general G0W0 calculations.
All input can be either single inputs or lists of inputs
"""
def __init__(self, scf_inputs, nscf_inputs, scr_inputs, sigma_inputs,
workdir=None, manager=None):
"""
Args:
scf_inputs: Input(s) for the SCF run, if it is a list add all but only link
to the last input (used for convergence studies on the KS band gap)
nscf_inputs: Input(s) for the NSCF run, if it is a list add all but only
link to the last (i.e. addditiona DOS and BANDS)
scr_inputs: Input for the screening run
sigma_inputs: List of :class:AbinitInput`for the self-energy run.
if scr and sigma are lists of the same length, every sigma gets its own screening.
if there is only one screening all sigma inputs are linked to this one
workdir: Working directory of the calculation.
manager: :class:`TaskManager` object.
"""
super(G0W0Work, self).__init__(workdir=workdir, manager=manager)
spread_scr = (isinstance(sigma_inputs, (list, tuple)) and
isinstance(scr_inputs, (list, tuple)) and
len(sigma_inputs) == len(scr_inputs))
#print("spread_scr", spread_scr)
self.sigma_tasks = []
# Register the GS-SCF run.
# register all scf_inputs but link the nscf only the last scf in the list
# multiple scf_inputs can be provided to perform convergence studies
if isinstance(scf_inputs, (list, tuple)):
for scf_input in scf_inputs:
self.scf_task = self.register_scf_task(scf_input)
else:
self.scf_task = self.register_scf_task(scf_inputs)
# Register the NSCF run (s).
if isinstance(nscf_inputs, (list, tuple)):
for nscf_input in nscf_inputs:
self.nscf_task = nscf_task = self.register_nscf_task(nscf_input, deps={self.scf_task: "DEN"})
else:
self.nscf_task = nscf_task = self.register_nscf_task(nscf_inputs, deps={self.scf_task: "DEN"})
# Register the SCR and SIGMA run(s).
if spread_scr:
for scr_input, sigma_input in zip(scr_inputs, sigma_inputs):
scr_task = self.register_scr_task(scr_input, deps={nscf_task: "WFK"})
sigma_task = self.register_sigma_task(sigma_input, deps={nscf_task: "WFK", scr_task: "SCR"})
self.sigma_tasks.append(sigma_task)
else:
# Sigma work(s) connected to the same screening.
scr_task = self.register_scr_task(scr_inputs, deps={nscf_task: "WFK"})
if isinstance(sigma_inputs, (list, tuple)):
for inp in sigma_inputs:
task = self.register_sigma_task(inp, deps={nscf_task: "WFK", scr_task: "SCR"})
self.sigma_tasks.append(task)
else:
task = self.register_sigma_task(sigma_inputs, deps={nscf_task: "WFK", scr_task: "SCR"})
self.sigma_tasks.append(task)
class SigmaConvWork(Work):
"""
Work for self-energy convergence studies.
"""
def __init__(self, wfk_node, scr_node, sigma_inputs, workdir=None, manager=None):
"""
Args:
wfk_node: The node who has produced the WFK file or filepath pointing to the WFK file.
scr_node: The node who has produced the SCR file or filepath pointing to the SCR file.
sigma_inputs: List of :class:`AbinitInput` for the self-energy runs.
workdir: Working directory of the calculation.
manager: :class:`TaskManager` object.
"""
# Cast to node instances.
wfk_node, scr_node = Node.as_node(wfk_node), Node.as_node(scr_node)
super(SigmaConvWork, self).__init__(workdir=workdir, manager=manager)
# Register the SIGMA runs.
if not isinstance(sigma_inputs, (list, tuple)):
sigma_inputs = [sigma_inputs]
for sigma_input in sigma_inputs:
self.register_sigma_task(sigma_input, deps={wfk_node: "WFK", scr_node: "SCR"})
class BseMdfWork(Work):
"""
Work for simple BSE calculations in which the self-energy corrections
are approximated by the scissors operator and the screening is modeled
with the model dielectric function.
"""
def __init__(self, scf_input, nscf_input, bse_inputs, workdir=None, manager=None):
"""
Args:
scf_input: Input for the SCF run.
nscf_input: Input for the NSCF run.
bse_inputs: List of Inputs for the BSE run.
workdir: Working directory of the calculation.
manager: :class:`TaskManager`.
"""
super(BseMdfWork, self).__init__(workdir=workdir, manager=manager)
# Register the GS-SCF run.
self.scf_task = self.register_scf_task(scf_input)
# Construct the input for the NSCF run.
self.nscf_task = self.register_nscf_task(nscf_input, deps={self.scf_task: "DEN"})
# Construct the input(s) for the BSE run.
if not isinstance(bse_inputs, (list, tuple)):
bse_inputs = [bse_inputs]
for bse_input in bse_inputs:
self.register_bse_task(bse_input, deps={self.nscf_task: "WFK"})
def get_mdf_robot(self):
"""Builds and returns a :class:`MdfRobot` for analyzing the results in the MDF files."""
from abilab.robots import MdfRobot
robot = MdfRobot()
for task in self[2:]:
mdf_path = task.outdir.has_abiext(robot.EXT)
if mdf_path:
robot.add_file(str(task), mdf_path)
return robot
#def plot_conv_mdf(self, **kwargs)
# with self.get_mdf_robot() as robot:
# robot.get_mdf_plooter()
# plotter.plot(**kwargs)
class QptdmWork(Work):
"""
This work parallelizes the calculation of the q-points of the screening.
It also provides the callback `on_all_ok` that calls mrgscr to merge
all the partial screening files produced.
"""
def create_tasks(self, wfk_file, scr_input):
"""
Create the SCR tasks and register them in self.
Args:
wfk_file: Path to the ABINIT WFK file to use for the computation of the screening.
scr_input: Input for the screening calculation.
"""
assert len(self) == 0
wfk_file = self.wfk_file = os.path.abspath(wfk_file)
# Build a temporary work in the tmpdir that will use a shell manager
# to run ABINIT in order to get the list of q-points for the screening.
shell_manager = self.manager.to_shell_manager(mpi_procs=1)
w = Work(workdir=self.tmpdir.path_join("_qptdm_run"), manager=shell_manager)
fake_input = scr_input.deepcopy()
fake_task = w.register(fake_input)
w.allocate()
w.build()
# Create the symbolic link and add the magic value
# nqpdm = -1 to the input to get the list of q-points.
fake_task.inlink_file(wfk_file)
fake_task.set_vars({"nqptdm": -1})
fake_task.start_and_wait()
# Parse the section with the q-points
with NetcdfReader(fake_task.outdir.has_abiext("qptdms.nc")) as reader:
qpoints = reader.read_value("reduced_coordinates_of_kpoints")
#print("qpoints)
#w.rmtree()
# Now we can register the task for the different q-points
for qpoint in qpoints:
qptdm_input = scr_input.deepcopy()
qptdm_input.set_vars(nqptdm=1, qptdm=qpoint)
new_task = self.register_scr_task(qptdm_input, manager=self.manager)
# Add the garbage collector.
if self.flow.gc is not None:
new_task.set_gc(self.flow.gc)
self.allocate()
def merge_scrfiles(self, remove_scrfiles=True):
"""
This method is called when all the q-points have been computed.
It runs `mrgscr` in sequential on the local machine to produce
the final SCR file in the outdir of the `Work`.
If remove_scrfiles is True, the partial SCR files are removed after the merge.
"""
scr_files = list(filter(None, [task.outdir.has_abiext("SCR") for task in self]))
logger.debug("will call mrgscr to merge %s:\n" % str(scr_files))
assert len(scr_files) == len(self)
mrgscr = wrappers.Mrgscr(manager=self[0].manager, verbose=1)
final_scr = mrgscr.merge_qpoints(self.outdir.path, scr_files, out_prefix="out")
if remove_scrfiles:
for scr_file in scr_files:
try:
os.remove(scr_file)
except IOError:
pass
return final_scr
#@check_spectator
def on_all_ok(self):
"""
This method is called when all the q-points have been computed.
It runs `mrgscr` in sequential on the local machine to produce
the final SCR file in the outdir of the `Work`.
"""
final_scr = self.merge_scrfiles()
return self.Results(node=self, returncode=0, message="mrgscr done", final_scr=final_scr)
@deprecated(message="This class is deprecated and will be removed in pymatgen 4.0. Use PhononWork")
def build_oneshot_phononwork(scf_input, ph_inputs, workdir=None, manager=None, work_class=None):
"""
Returns a work for the computation of phonon frequencies
ph_inputs is a list of input for Phonon calculation in which all the independent perturbations
are explicitly computed i.e.
* rfdir 1 1 1
* rfatpol 1 natom
.. warning::
This work is mainly used for simple calculations, e.g. convergence studies.
Use :class:`PhononWork` for better efficiency.
"""
work_class = OneShotPhononWork if work_class is None else work_class
work = work_class(workdir=workdir, manager=manager)
scf_task = work.register_scf_task(scf_input)
ph_inputs = [ph_inputs] if not isinstance(ph_inputs, (list, tuple)) else ph_inputs
for phinp in ph_inputs:
# Check rfdir and rfatpol.
rfdir = np.array(phinp.get("rfdir", [0, 0, 0]))
if len(rfdir) != 3 or any(rfdir != (1, 1, 1)):
raise ValueError("Expecting rfdir == (1, 1, 1), got %s" % rfdir)
rfatpol = np.array(phinp.get("rfatpol", [1, 1]))
if len(rfatpol) != 2 or any(rfatpol != (1, len(phinp.structure))):
raise ValueError("Expecting rfatpol == (1, natom), got %s" % rfatpol)
# cannot use PhononTaks here because the Task is not able to deal with multiple phonon calculations
ph_task = work.register(phinp, deps={scf_task: "WFK"})
return work
#class OneShotPhononWork(Work):
# """
# Simple and very inefficient work for the computation of the phonon frequencies
# It consists of a GS task and a DFPT calculations for all the independent perturbations.
# The main advantage is that one has direct access to the phonon frequencies that
# can be computed at the end of the second task without having to call anaddb.
#
# Use ``build_oneshot_phononwork`` to construct this work from the input files.
# """
# @deprecated(message="This class is deprecated and will be removed in pymatgen 4.0. Use PhononWork")
# def read_phonons(self):
# """
# Read phonon frequencies from the output file.
#
# Return:
# List of namedtuples. Each `namedtuple` has the following attributes:
#
# - qpt: ndarray with the q-point in reduced coordinates.
# - freqs: ndarray with 3 x Natom phonon frequencies in meV
# """
# #
# # Phonon wavevector (reduced coordinates) : 0.00000 0.00000 0.00000
# # Phonon energies in Hartree :
# # 1.089934E-04 4.990512E-04 1.239177E-03 1.572715E-03 1.576801E-03
# # 1.579326E-03
# # Phonon frequencies in cm-1 :
# # - 2.392128E+01 1.095291E+02 2.719679E+02 3.451711E+02 3.460677E+02
# # - 3.466221E+02
# BEGIN = " Phonon wavevector (reduced coordinates) :"
# END = " Phonon frequencies in cm-1 :"
#
# ph_tasks, qpts, phfreqs = self[1:], [], []
# for task in ph_tasks:
#
# # Parse output file.
# with open(task.output_file.path, "r") as fh:
# qpt, inside = None, 0
# for line in fh:
# if line.startswith(BEGIN):
# qpts.append([float(s) for s in line[len(BEGIN):].split()])
# inside, omegas = 1, []
# elif line.startswith(END):
# break
# elif inside:
# inside += 1
# if inside > 2:
# omegas.extend((float(s) for s in line.split()))
# else:
# raise ValueError("Cannot find %s in file %s" % (END, task.output_file.path))
#
# phfreqs.append(omegas)
#
# # Use namedtuple to store q-point and frequencies in meV
# phonon = collections.namedtuple("phonon", "qpt freqs")
# return [phonon(qpt=qpt, freqs=freqs_meV) for qpt, freqs_meV in zip(qpts, EnergyArray(phfreqs, "Ha").to("meV") )]
#
# def get_results(self, **kwargs):
# results = super(OneShotPhononWork, self).get_results()
# phonons = self.read_phonons()
# results.update(phonons=phonons)
# return results
class MergeDdb(object):
"""Mixin class for Works that have to merge the DDB files produced by the tasks."""
def merge_ddb_files(self):
"""
This method is called when all the q-points have been computed.
It runs `mrgddb` in sequential on the local machine to produce
the final DDB file in the outdir of the `Work`.
Returns:
path to the output DDB file
"""
ddb_files = list(filter(None, [task.outdir.has_abiext("DDB") for task in self \
if isinstance(task, DfptTask)]))
self.history.info("Will call mrgddb to merge %s:\n" % str(ddb_files))
# DDB files are always produces so this should never happen!
if not ddb_files:
raise RuntimeError("Cannot find any DDB file to merge by the task of " % self)
# Final DDB file will be produced in the outdir of the work.
out_ddb = self.outdir.path_in("out_DDB")
if len(ddb_files) == 1:
# Avoid the merge. Just copy the DDB file to the outdir of the work.
shutil.copy(ddb_files[0], out_ddb)
else:
# Call mrgddb
desc = "DDB file merged by %s on %s" % (self.__class__.__name__, time.asctime())
mrgddb = wrappers.Mrgddb(manager=self[0].manager, verbose=0)
mrgddb.merge(self.outdir.path, ddb_files, out_ddb=out_ddb, description=desc)
return out_ddb
class PhononWork(Work, MergeDdb):
"""
This work usually consists of one GS + nirred Phonon tasks where nirred is
the number of irreducible perturbations for a given q-point.
It provides the callback method (on_all_ok) that calls mrgddb to merge the partial DDB files produced
"""
@classmethod
def from_scf_task(cls, scf_task, qpoints, is_ngqpt=False, tolerance=None, manager=None):
"""
Construct a `PhononWork` from a :class:`ScfTask` object.
The input file for phonons is automatically generated from the input of the ScfTask.
Each phonon task depends on the WFK file produced by scf_task.
Args:
scf_task: ScfTask object.
qpoints: q-points in reduced coordinates. Accepts single q-point, list of q-points
or three integers defining the q-mesh if `is_ngqpt`.
is_ngqpt: True if `qpoints` should be interpreted as divisions instead of q-points.
tolerance: dict {varname: value} with the tolerance to be used in the DFPT run.
Defaults to {"tolvrs": 1.0e-10}.
manager: :class:`TaskManager` object.
"""
if not isinstance(scf_task, ScfTask):
raise TypeError("task %s does not inherit from ScfTask" % scf_task)
if is_ngqpt:
qpoints = scf_task.input.abiget_ibz(ngkpt=qpoints, shiftk=[0, 0, 0], kptopt=1).points
qpoints = np.reshape(qpoints, (-1, 3))
new = cls(manager=manager)
for qpt in qpoints:
multi = scf_task.input.make_ph_inputs_qpoint(qpt, tolerance=tolerance)
for ph_inp in multi:
new.register_phonon_task(ph_inp, deps={scf_task: "WFK"})
return new
@classmethod
def from_scf_input(cls, scf_input, qpoints, is_ngqpt=False, tolerance=None, manager=None):
"""
Similar to `from_scf_task`, the difference is that this method requires
an input for SCF calculation instead of a ScfTask. All the tasks (Scf + Phonon)
are packed in a single Work whereas in the previous case we usually have multiple works.
"""
if is_ngqpt:
qpoints = scf_input.abiget_ibz(ngkpt=qpoints, shiftk=[0, 0, 0], kptopt=1).points
qpoints = np.reshape(qpoints, (-1, 3))
new = cls(manager=manager)
scf_task = new.register_scf_task(scf_input)
for qpt in qpoints:
multi = scf_task.input.make_ph_inputs_qpoint(qpt, tolerance=tolerance)
for ph_inp in multi:
new.register_phonon_task(ph_inp, deps={scf_task: "WFK"})
return new
def merge_pot1_files(self):
"""
This method is called when all the q-points have been computed.
It runs `mrgdvdb` in sequential on the local machine to produce
the final DVDB file in the outdir of the `Work`.
Returns:
path to the output DVDB file. None if not DFPT POT file is found.
"""
pot1_files = []
for task in self:
if not isinstance(task, DfptTask): continue
pot1_files.extend(task.outdir.list_filepaths(wildcard="*_POT*"))
# prtpot=0 disables the output of the DFPT POT files so an empty list is not fatal here.
if not pot1_files: return None
self.history.info("Will call mrgdvdb to merge %s:\n" % str(pot1_files))
# Final DDB file will be produced in the outdir of the work.
out_dvdb = self.outdir.path_in("out_DVDB")
if len(pot1_files) == 1:
# Avoid the merge. Just move the DDB file to the outdir of the work
shutil.copy(pot1_files[0], out_dvdb)
else:
mrgdvdb = wrappers.Mrgdvdb(manager=self[0].manager, verbose=0)
mrgdvdb.merge(self.outdir.path, pot1_files, out_dvdb)
return out_dvdb
#@check_spectator
def on_all_ok(self):
"""
This method is called when all the q-points have been computed.
Ir runs `mrgddb` in sequential on the local machine to produce
the final DDB file in the outdir of the `Work`.
"""
# Merge DDB files.
out_ddb = self.merge_ddb_files()
# Merge DVDB files.
out_dvdb = self.merge_pot1_files()
results = self.Results(node=self, returncode=0, message="DDB merge done")
results.register_gridfs_files(DDB=(out_ddb, "t"))
return results
class BecWork(Work, MergeDdb):
"""
Work for the computation of the Born effective charges.
This work consists of DDK tasks and phonon + electric field perturbation
It provides the callback method (on_all_ok) that calls mrgddb to merge the
partial DDB files produced by the work.
"""
@classmethod
def from_scf_task(cls, scf_task, ddk_tolerance=None):
"""Build a BecWork from a ground-state task."""
if not isinstance(scf_task, ScfTask):
raise TypeError("task %s does not inherit from GsTask" % scf_task)
new = cls() #manager=scf_task.manager)
# DDK calculations
multi_ddk = scf_task.input.make_ddk_inputs(tolerance=ddk_tolerance)
ddk_tasks = []
for ddk_inp in multi_ddk:
ddk_task = new.register_ddk_task(ddk_inp, deps={scf_task: "WFK"})
ddk_tasks.append(ddk_task)
# Build the list of inputs for electric field perturbation and phonons
# Each bec task is connected to all the previous DDK task and to the scf_task.
bec_deps = {ddk_task: "DDK" for ddk_task in ddk_tasks}
bec_deps.update({scf_task: "WFK"})
bec_inputs = scf_task.input.make_bec_inputs() #tolerance=efile
for bec_inp in bec_inputs:
new.register_bec_task(bec_inp, deps=bec_deps)
return new
def on_all_ok(self):
"""
This method is called when all the task reach S_OK
Ir runs `mrgddb` in sequential on the local machine to produce
the final DDB file in the outdir of the `Work`.
"""
# Merge DDB files.
out_ddb = self.merge_ddb_files()
results = self.Results(node=self, returncode=0, message="DDB merge done")
results.register_gridfs_files(DDB=(out_ddb, "t"))
return results
class DteWork(Work, MergeDdb):
"""
Work for the computation of the third derivative of the energy.
This work consists of DDK tasks and electric field perturbation.
It provides the callback method (on_all_ok) that calls mrgddb to merge the partial DDB files produced
"""
@classmethod
def from_scf_task(cls, scf_task, ddk_tolerance=None):
"""Build a DteWork from a ground-state task."""
if not isinstance(scf_task, ScfTask):
raise TypeError("task %s does not inherit from GsTask" % scf_task)
new = cls() #manager=scf_task.manager)
# DDK calculations
multi_ddk = scf_task.input.make_ddk_inputs(tolerance=ddk_tolerance)
ddk_tasks = []
for ddk_inp in multi_ddk:
ddk_task = new.register_ddk_task(ddk_inp, deps={scf_task: "WFK"})
ddk_tasks.append(ddk_task)
# Build the list of inputs for electric field perturbation
# Each task is connected to all the previous DDK, DDE task and to the scf_task.
multi_dde = scf_task.input.make_dde_inputs(use_symmetries=False)
# To compute the nonlinear coefficients all the directions of the perturbation
# have to be taken in consideration
# DDE calculations
dde_tasks = []
dde_deps = {ddk_task: "DDK" for ddk_task in ddk_tasks}
dde_deps.update({scf_task: "WFK"})
for dde_inp in multi_dde:
dde_task = new.register_dde_task(dde_inp, deps=dde_deps)
dde_tasks.append(dde_task)
#DTE calculations
dte_deps = {scf_task: "WFK DEN"}
dte_deps.update({dde_task: "1WF 1DEN" for dde_task in dde_tasks})
multi_dte = scf_task.input.make_dte_inputs()
dte_tasks = []
for dte_inp in multi_dte:
dte_task = new.register_dte_task(dte_inp, deps=dte_deps)
dte_tasks.append(dte_task)
return new
def on_all_ok(self):
"""
This method is called when all the task reach S_OK
Ir runs `mrgddb` in sequential on the local machine to produce
the final DDB file in the outdir of the `Work`.
"""
# Merge DDB files.
out_ddb = self.merge_ddb_files()
results = self.Results(node=self, returncode=0, message="DDB merge done")
results.register_gridfs_files(DDB=(out_ddb, "t"))
return results
|
setten/pymatgen
|
pymatgen/io/abinit/works.py
|
Python
|
mit
| 56,981
|
[
"ABINIT",
"Gaussian",
"NetCDF",
"pymatgen"
] |
9bb32f541c64b4dd18c9c81dc8db1f588d5d93b8da6a4f30efdc9c7339975cba
|
''' -- imports from installed packages -- '''
import json
import datetime
''' -- imports from django -- '''
from django.shortcuts import render_to_response, render
from django.template import RequestContext
from django.template import Context
from django.template.defaultfilters import slugify
from django.core.urlresolvers import reverse
from django.http import HttpResponseRedirect
from django.http import HttpResponse
from django.template.loader import get_template
from django.contrib.auth.models import User
from django.contrib.sites.models import Site
from django.contrib.auth.decorators import login_required
''' -- imports from django_mongokit -- '''
''' -- imports from gstudio -- '''
from gnowsys_ndf.ndf.models import GSystemType, GSystem,Node
from gnowsys_ndf.ndf.models import node_collection, triple_collection
from gnowsys_ndf.ndf.views.methods import get_forum_repl_type,forum_notification_status
from gnowsys_ndf.ndf.templatetags.ndf_tags import get_forum_twists,get_all_replies
from gnowsys_ndf.ndf.views.methods import set_all_urls,check_delete,get_execution_time
from gnowsys_ndf.settings import GAPPS
from gnowsys_ndf.ndf.views.notify import set_notif_val,get_userobject
from gnowsys_ndf.ndf.org2any import org2html
try:
from bson import ObjectId
except ImportError: # old pymongo
from pymongo.objectid import ObjectId
# ##########################################################################
forum_st = node_collection.one({'$and':[{'_type':'GSystemType'},{'name':GAPPS[5]}]})
start_time = node_collection.one({'$and':[{'_type':'AttributeType'},{'name':'start_time'}]})
end_time = node_collection.one({'$and':[{'_type':'AttributeType'},{'name':'end_time'}]})
reply_st = node_collection.one({'$and':[{'_type':'GSystemType'},{'name':'Reply'}]})
twist_st = node_collection.one({'$and':[{'_type':'GSystemType'},{'name':'Twist'}]})
sitename=Site.objects.all()[0].name.__str__()
app = forum_st
@get_execution_time
def forum(request, group_id, node_id=None):
'''
Method to list all the available forums and to return forum-search-query result.
'''
# method to convert group_id to ObjectId if it is groupname
ins_objectid = ObjectId()
if ins_objectid.is_valid(group_id) is False :
group_ins = node_collection.find_one({'_type': "Group","name": group_id})
auth = node_collection.one({'_type': 'Author', 'name': unicode(request.user.username) })
if group_ins:
group_id = str(group_ins._id)
else :
auth = node_collection.find_one({'_type': 'Author', 'name': unicode(request.user.username) })
if auth :
group_id = str(auth._id)
else :
auth = node_collection.find_one({'_type': 'Author', 'name': unicode(group_id) })
if auth:
group_id=str(auth._id)
else :
pass
# getting Forum GSystem's ObjectId
if node_id is None:
node_ins = node_collection.find_one({'_type':"GSystemType", "name":"Forum"})
if node_ins:
node_id = str(node_ins._id)
if request.method == "POST":
# Forum search view
title = forum_st.name
search_field = request.POST['search_field']
existing_forums = node_collection.find({'member_of': {'$all': [ObjectId(forum_st._id)]},
'$or': [{'name': {'$regex': search_field, '$options': 'i'}},
{'tags': {'$regex':search_field, '$options': 'i'}}],
'group_set': {'$all': [ObjectId(group_id)]},
'status':{'$nin':['HIDDEN']}
}).sort('last_update', -1)
return render_to_response("ndf/forum.html",
{'title': title,
'searching': True, 'query': search_field,
'existing_forums': existing_forums, 'groupid':group_id, 'group_id':group_id
},
context_instance=RequestContext(request)
)
elif forum_st._id == ObjectId(node_id):
# Forum list view
existing_forums = node_collection.find({'member_of': {'$all': [ObjectId(node_id)]}, 'group_set': {'$all': [ObjectId(group_id)]},
'status':{'$nin':['HIDDEN']}
}).sort('last_update', -1)
forum_detail_list = []
for each in existing_forums:
temp_forum = {}
temp_forum['name'] = each.name
temp_forum['created_at'] = each.created_at
temp_forum['tags'] = each.tags
temp_forum['member_of_names_list'] = each.member_of_names_list
temp_forum['user_details_dict'] = each.user_details_dict
temp_forum['html_content'] = each.html_content
temp_forum['contributors'] = each.contributors
temp_forum['id'] = each._id
temp_forum['threads'] = node_collection.find({'$and':[{'_type':'GSystem'},{'prior_node':ObjectId(each._id)}],
'status':{'$nin':['HIDDEN']}
}).count()
forum_detail_list.append(temp_forum)
variables = RequestContext(request,{'existing_forums': forum_detail_list,'groupid': group_id, 'group_id': group_id})
return render_to_response("ndf/forum.html",variables)
@login_required
@get_execution_time
def create_forum(request,group_id):
'''
Method to create forum and Retrieve all the forums
'''
# method to convert group_id to ObjectId if it is groupname
ins_objectid = ObjectId()
if ins_objectid.is_valid(group_id) is False :
group_ins = node_collection.find_one({'_type': "Group","name": group_id})
auth = node_collection.one({'_type': 'Author', 'name': unicode(request.user.username) })
if group_ins:
group_id = str(group_ins._id)
else :
auth = node_collection.one({'_type': 'Author', 'name': unicode(request.user.username) })
if auth :
group_id = str(auth._id)
else :
pass
# getting all the values from submitted form
if request.method == "POST":
colg = node_collection.one({'_id':ObjectId(group_id)}) # getting group ObjectId
colf = node_collection.collection.GSystem() # creating new/empty GSystem object
name = unicode(request.POST.get('forum_name',"")).strip() # forum name
colf.name = name
content_org = request.POST.get('content_org',"") # forum content
if content_org:
colf.content_org = unicode(content_org)
usrname = request.user.username
filename = slugify(name) + "-" + usrname + "-"
colf.content = org2html(content_org, file_prefix=filename)
usrid = int(request.user.id)
usrname = unicode(request.user.username)
colf.created_by=usrid
colf.modified_by = usrid
if usrid not in colf.contributors:
colf.contributors.append(usrid)
colf.group_set.append(colg._id)
# appending user group's ObjectId in group_set
user_group_obj = node_collection.one({'$and':[{'_type':u'Group'},{'name':usrname}]})
if user_group_obj:
if user_group_obj._id not in colf.group_set:
colf.group_set.append(user_group_obj._id)
colf.member_of.append(forum_st._id)
################# ADDED 14th July.Its done!
colf.access_policy = u"PUBLIC"
colf.url = set_all_urls(colf.member_of)
### currently timed forum feature is not in use ###
# sdate=request.POST.get('sdate',"")
# shrs= request.POST.get('shrs',"")
# smts= request.POST.get('smts',"")
# edate= request.POST.get('edate',"")
# ehrs= request.POST.get('ehrs',"")
# emts=request.POST.get('emts',"")
# start_dt={}
# end_dt={}
# if not shrs:
# shrs=0
# if not smts:
# smts=0
# if sdate:
# sdate1=sdate.split("/")
# st_date = datetime.datetime(int(sdate1[2]),int(sdate1[0]),int(sdate1[1]),int(shrs),int(smts))
# start_dt[start_time.name]=st_date
# if not ehrs:
# ehrs=0
# if not emts:
# emts=0
# if edate:
# edate1=edate.split("/")
# en_date= datetime.datetime(int(edate1[2]),int(edate1[0]),int(edate1[1]),int(ehrs),int(emts))
# end_dt[end_time.name]=en_date
# colf.attribute_set.append(start_dt)
# colf.attribute_set.append(end_dt)
colf.save()
'''Code to send notification to all members of the group except those whose notification preference is turned OFF'''
link="http://"+sitename+"/"+str(colg._id)+"/forum/"+str(colf._id)
for each in colg.author_set:
bx=User.objects.filter(id=each)
if bx:
bx=User.objects.get(id=each)
else:
continue
activity="Added forum"
msg=usrname+" has added a forum in the group -'"+colg.name+"'\n"+"Please visit "+link+" to see the forum."
if bx:
auth = node_collection.one({'_type': 'Author', 'name': unicode(bx.username) })
if colg._id and auth:
no_check=forum_notification_status(colg._id,auth._id)
else:
no_check=True
if no_check:
ret = set_notif_val(request,colg._id,msg,activity,bx)
# returning response to ndf/forumdetails.html
return HttpResponseRedirect(reverse('show', kwargs={'group_id':group_id,'forum_id': colf._id }))
# variables=RequestContext(request,{'forum':colf})
# return render_to_response("ndf/forumdetails.html",variables)
# getting all the GSystem of forum to provide autocomplete/intellisence of forum names
available_nodes = node_collection.find({'_type': u'GSystem', 'member_of': ObjectId(forum_st._id),'group_set': ObjectId(group_id) })
nodes_list = []
for each in available_nodes:
nodes_list.append(str((each.name).strip().lower()))
return render_to_response("ndf/create_forum.html",{'group_id':group_id,'groupid':group_id, 'nodes_list': nodes_list},RequestContext(request))
@login_required
@get_execution_time
def edit_forum(request,group_id,forum_id):
'''
Method to create forum and Retrieve all the forums
'''
forum=node_collection.one({'_id':ObjectId(forum_id)})
# method to convert group_id to ObjectId if it is groupname
ins_objectid = ObjectId()
if ins_objectid.is_valid(group_id) is False :
group_ins = node_collection.find_one({'_type': "Group","name": group_id})
auth = node_collection.one({'_type': 'Author', 'name': unicode(request.user.username) })
if group_ins:
group_id = str(group_ins._id)
else :
auth = node_collection.one({'_type': 'Author', 'name': unicode(request.user.username) })
if auth :
group_id = str(auth._id)
else :
pass
# getting all the values from submitted form
if request.method == "POST":
colg = node_collection.one({'_id':ObjectId(group_id)}) # getting group ObjectId
colf = node_collection.one({'_id':ObjectId(forum_id)}) # creating new/empty GSystem object
name = unicode(request.POST.get('forum_name',"")).strip() # forum name
colf.name = name
content_org = request.POST.get('content_org',"") # forum content
if content_org:
colf.content_org = unicode(content_org)
usrname = request.user.username
filename = slugify(name) + "-" + usrname + "-"
colf.content = org2html(content_org, file_prefix=filename)
usrid = int(request.user.id)
usrname = unicode(request.user.username)
colf.modified_by = usrid
if usrid not in colf.contributors:
colf.contributors.append(usrid)
################# ADDED 14th July.Its done!
colf.access_policy = u"PUBLIC"
colf.url = set_all_urls(colf.member_of)
### currently timed forum feature is not in use ###
# sdate=request.POST.get('sdate',"")
# shrs= request.POST.get('shrs',"")
# smts= request.POST.get('smts',"")
# edate= request.POST.get('edate',"")
# ehrs= request.POST.get('ehrs',"")
# emts=request.POST.get('emts',"")
# start_dt={}
# end_dt={}
# if not shrs:
# shrs=0
# if not smts:
# smts=0
# if sdate:
# sdate1=sdate.split("/")
# st_date = datetime.datetime(int(sdate1[2]),int(sdate1[0]),int(sdate1[1]),int(shrs),int(smts))
# start_dt[start_time.name]=st_date
# if not ehrs:
# ehrs=0
# if not emts:
# emts=0
# if edate:
# edate1=edate.split("/")
# en_date= datetime.datetime(int(edate1[2]),int(edate1[0]),int(edate1[1]),int(ehrs),int(emts))
# end_dt[end_time.name]=en_date
# colf.attribute_set.append(start_dt)
# colf.attribute_set.append(end_dt)
colf.save()
'''Code to send notification to all members of the group except those whose notification preference is turned OFF'''
link="http://"+sitename+"/"+str(colg._id)+"/forum/"+str(colf._id)
for each in colg.author_set:
bx=User.objects.get(id=each)
activity="Edited forum"
msg=usrname+" has edited forum -" +colf.name+" in the group -'"+colg.name+"'\n"+"Please visit "+link+" to see the forum."
if bx:
auth = node_collection.one({'_type': 'Author', 'name': unicode(bx.username) })
if colg._id and auth:
no_check=forum_notification_status(colg._id,auth._id)
else:
no_check=True
if no_check:
ret = set_notif_val(request,colg._id,msg,activity,bx)
# returning response to ndf/forumdetails.html
return HttpResponseRedirect(reverse('show', kwargs={'group_id':group_id,'forum_id': colf._id }))
# variables=RequestContext(request,{'forum':colf})
# return render_to_response("ndf/forumdetails.html",variables)
# getting all the GSystem of forum to provide autocomplete/intellisence of forum names
available_nodes = node_collection.find({'_type': u'GSystem', 'member_of': ObjectId(forum_st._id),'group_set': ObjectId(group_id) })
nodes_list = []
for each in available_nodes:
nodes_list.append(str((each.name).strip().lower()))
return render_to_response("ndf/edit_forum.html",{'group_id':group_id,'groupid':group_id, 'nodes_list': nodes_list,'forum':forum},RequestContext(request))
@get_execution_time
def display_forum(request,group_id,forum_id):
forum = node_collection.one({'_id': ObjectId(forum_id)})
usrname = User.objects.get(id=forum.created_by).username
ins_objectid = ObjectId()
if ins_objectid.is_valid(group_id) is False :
group_ins = node_collection.find_one({'_type': "Group","name": group_id})
auth = node_collection.one({'_type': 'Author', 'name': unicode(request.user.username) })
if group_ins:
group_id = str(group_ins._id)
else :
auth = node_collection.one({'_type': 'Author', 'name': unicode(request.user.username) })
if auth :
group_id = str(auth._id)
else :
pass
forum_object = node_collection.one({'_id': ObjectId(forum_id)})
if forum_object._type == "GSystemType":
return forum(request, group_id, forum_id)
th_all=get_forum_twists(forum)
if th_all:
th_count=len(list(th_all))
else:
th_count=0
variables = RequestContext(request,{
'forum':forum,
'groupid':group_id,'group_id':group_id,
'forum_created_by':usrname,
'thread_count':th_count,
})
return render_to_response("ndf/forumdetails.html",variables)
@get_execution_time
def display_thread(request,group_id, thread_id, forum_id=None):
'''
Method to display thread and it's content
'''
ins_objectid = ObjectId()
if ins_objectid.is_valid(group_id) is False :
group_ins = node_collection.find_one({'_type': "Group","name": group_id})
# auth = node_collection.one({'_type': 'Author', 'name': unicode(request.user.username) })
if group_ins:
group_id = str(group_ins._id)
else :
auth = node_collection.one({'_type': 'Author', 'name': unicode(request.user.username) })
if auth :
group_id = str(auth._id)
else :
pass
try:
thread = node_collection.one({'_id': ObjectId(thread_id)})
rep_lst=get_all_replies(thread)
lst_rep=list(rep_lst)
if lst_rep:
reply_count=len(lst_rep)
else:
reply_count=0
print "reply count=",reply_count
forum = ""
for each in thread.prior_node:
forum=node_collection.one({'$and':[{'member_of': {'$all': [forum_st._id]}},{'_id':ObjectId(each)}]})
if forum:
usrname = User.objects.get(id=forum.created_by).username
variables = RequestContext(request,
{ 'forum':forum,
'thread':thread,
'groupid':group_id,
'group_id':group_id,
'eachrep':thread,
'user':request.user,
'reply_count':reply_count,
'forum_created_by':usrname
})
return render_to_response("ndf/thread_details.html",variables)
usrname = User.objects.get(id=thread.created_by).username
variables= RequestContext(request,
{ 'forum':thread,
'thread':None,
'groupid':group_id,
'group_id':group_id,
'eachrep':thread,
'user':request.user,
'reply_count':reply_count,
'forum_created_by':usrname
})
return render_to_response("ndf/thread_details.html",variables)
except Exception as e:
print "Exception in thread_details "+str(e)
pass
@login_required
@get_execution_time
def create_thread(request, group_id, forum_id):
'''
Method to create thread
'''
forum = node_collection.one({'_id': ObjectId(forum_id)})
# forum_data = {
# 'name':forum.name,
# 'content':forum.content,
# 'created_by':User.objects.get(id=forum.created_by).username
# }
# print forum_data
forum_threads = []
exstng_reply = node_collection.find({'$and':[{'_type':'GSystem'},{'prior_node':ObjectId(forum._id)}],'status':{'$nin':['HIDDEN']}})
exstng_reply.sort('created_at')
for each in exstng_reply:
forum_threads.append(each.name)
if request.method == "POST":
colg = node_collection.one({'_id':ObjectId(group_id)})
name = unicode(request.POST.get('thread_name',""))
content_org = request.POST.get('content_org',"")
# -------------------
colrep = node_collection.collection.GSystem()
colrep.member_of.append(twist_st._id)
#### ADDED ON 14th July
colrep.access_policy = u"PUBLIC"
colrep.url = set_all_urls(colrep.member_of)
colrep.prior_node.append(forum._id)
colrep.name = name
if content_org:
colrep.content_org = unicode(content_org)
# Required to link temporary files with the current user who is modifying this document
usrname = request.user.username
filename = slugify(name) + "-" + usrname + "-"
colrep.content = org2html(content_org, file_prefix=filename)
print "content=",colrep.content
usrid=int(request.user.id)
colrep.created_by=usrid
colrep.modified_by = usrid
if usrid not in colrep.contributors:
colrep.contributors.append(usrid)
colrep.group_set.append(colg._id)
colrep.save()
'''Code to send notification to all members of the group except those whose notification preference is turned OFF'''
link="http://"+sitename+"/"+str(colg._id)+"/forum/thread/"+str(colrep._id)
for each in colg.author_set:
bx=User.objects.filter(id=each)
if bx:
bx=User.objects.get(id=each)
else:
continue
activity="Added thread"
msg=request.user.username+" has added a thread in the forum " + forum.name + " in the group -'" + colg.name+"'\n"+"Please visit "+link+" to see the thread."
if bx:
auth = node_collection.one({'_type': 'Author', 'name': unicode(bx.username) })
if colg._id and auth:
no_check=forum_notification_status(colg._id,auth._id)
else:
no_check=True
if no_check:
ret = set_notif_val(request,colg._id,msg,activity,bx)
variables = RequestContext(request,
{ 'forum':forum,
'thread':colrep,
'eachrep':colrep,
'groupid':group_id,
'group_id':group_id,
'user':request.user,
'reply_count':0,
'forum_threads': json.dumps(forum_threads),
'forum_created_by':User.objects.get(id=forum.created_by).username
})
return render_to_response("ndf/thread_details.html",variables)
else:
return render_to_response("ndf/create_thread.html",
{ 'group_id':group_id,
'groupid':group_id,
'forum': forum,
'forum_threads': json.dumps(forum_threads),
'forum_created_by':User.objects.get(id=forum.created_by).username
},
RequestContext(request))
@login_required
@get_execution_time
def add_node(request,group_id):
ins_objectid = ObjectId()
if ins_objectid.is_valid(group_id) is False :
group_ins = node_collection.find_one({'_type': "Group","name": group_id})
auth = node_collection.one({'_type': 'Author', 'name': unicode(request.user.username) })
if group_ins:
group_id = str(group_ins._id)
else :
auth = node_collection.one({'_type': 'Author', 'name': unicode(request.user.username) })
if auth :
group_id = str(auth._id)
else :
pass
try:
auth = node_collection.one({'_type': 'Author', 'name': unicode(request.user.username) })
content_org = request.POST.get("reply","")
node = request.POST.get("node","")
thread = request.POST.get("thread","") # getting thread _id
forumid = request.POST.get("forumid","") # getting forum _id
sup_id = request.POST.get("supnode","") #getting _id of it's parent node
tw_name = request.POST.get("twistname","")
forumobj = ""
groupobj = ""
colg = node_collection.one({'_id':ObjectId(group_id)})
if forumid:
forumobj = node_collection.one({"_id": ObjectId(forumid)})
sup = node_collection.one({"_id": ObjectId(sup_id)})
if not sup :
return HttpResponse("failure")
colrep = node_collection.collection.GSystem()
if node == "Twist":
name = tw_name
colrep.member_of.append(twist_st._id)
elif node == "Reply":
name = unicode("Reply of:"+str(sup._id))
colrep.member_of.append(reply_st._id)
colrep.prior_node.append(sup._id)
colrep.name = name
if content_org:
colrep.content_org = unicode(content_org)
# Required to link temporary files with the current user who is modifying this document
usrname = request.user.username
filename = slugify(name) + "-" + usrname + "-"
colrep.content = org2html(content_org, file_prefix = filename)
usrid = int(request.user.id)
colrep.created_by = usrid
colrep.modified_by = usrid
if usrid not in colrep.contributors:
colrep.contributors.append(usrid)
colrep.prior_node.append(sup._id)
colrep.name = name
if content_org:
colrep.content_org = unicode(content_org)
# Required to link temporary files with the current user who is modifying this document
usrname = request.user.username
filename = slugify(name) + "-" + usrname + "-"
colrep.content = org2html(content_org, file_prefix=filename)
usrid=int(request.user.id)
colrep.created_by=usrid
colrep.modified_by = usrid
if usrid not in colrep.contributors:
colrep.contributors.append(usrid)
colrep.group_set.append(colg._id)
colrep.save()
# print "----------", colrep._id
groupname = colg.name
if node == "Twist" :
url="http://"+sitename+"/"+str(group_id)+"/forum/thread/"+str(colrep._id)
activity=request.user.username+" -added a thread '"
prefix="' on the forum '"+forumobj.name+"'"
nodename=name
if node == "Reply":
threadobj=node_collection.one({"_id": ObjectId(thread)})
url="http://"+sitename+"/"+str(group_id)+"/forum/thread/"+str(threadobj._id)
activity=request.user.username+" -added a reply "
prefix=" on the thread '"+threadobj.name+"' on the forum '"+forumobj.name+"'"
nodename=""
link = url
for each in colg.author_set:
if each != colg.created_by:
bx=User.objects.get(id=each)
msg=activity+"-"+nodename+prefix+" in the group '"+ groupname +"'\n"+"Please visit "+link+" to see the updated page"
if bx:
no_check=forum_notification_status(group_id,auth._id)
if no_check:
ret = set_notif_val(request,group_id,msg,activity,bx)
bx=User.objects.get(id=colg.created_by)
msg=activity+"-"+nodename+prefix+" in the group '"+groupname+"' created by you"+"\n"+"Please visit "+link+" to see the updated page"
if bx:
no_check=forum_notification_status(group_id,auth._id)
if no_check:
ret = set_notif_val(request,group_id,msg,activity,bx)
if node == "Reply":
# if exstng_reply:
# exstng_reply.prior_node =[]
# exstng_reply.prior_node.append(colrep._id)
# exstng_reply.save()
threadobj=node_collection.one({"_id": ObjectId(thread)})
variables=RequestContext(request,{'thread':threadobj,'user':request.user,'forum':forumobj,'groupid':group_id,'group_id':group_id})
return render_to_response("ndf/refreshtwist.html",variables)
else:
templ=get_template('ndf/refreshthread.html')
html = templ.render(Context({'forum':forumobj,'user':request.user,'groupid':group_id,'group_id':group_id}))
return HttpResponse(html)
except Exception as e:
return HttpResponse(""+str(e))
return HttpResponse("success")
@get_execution_time
def get_profile_pic(username):
auth = node_collection.one({'_type': 'Author', 'name': unicode(username) })
prof_pic = node_collection.one({'_type': u'RelationType', 'name': u'has_profile_pic'})
dbref_profile_pic = prof_pic.get_dbref()
collection_tr = db[Triple.collection_name]
prof_pic_rel = collection_tr.Triple.find({'_type': 'GRelation', 'subject': ObjectId(auth._id), 'relation_type': dbref_profile_pic })
# prof_pic_rel will get the cursor object of relation of user with its profile picture
if prof_pic_rel.count() :
index = prof_pic_rel[prof_pic_rel.count() - 1].right_subject
img_obj = node_collection.one({'_type': 'File', '_id': ObjectId(index) })
else:
img_obj = ""
return img_obj
@login_required
@check_delete
@get_execution_time
def delete_forum(request,group_id,node_id,relns=None):
""" Changing status of forum to HIDDEN
"""
ins_objectid = ObjectId()
if ins_objectid.is_valid(group_id) is False :
group_ins = node_collection.find_one({'_type': "Group","name": group_id})
auth = node_collection.one({'_type': 'Author', 'name': unicode(request.user.username) })
if group_ins:
group_id = str(group_ins._id)
else :
auth = node_collection.one({'_type': 'Author', 'name': unicode(request.user.username) })
if auth :
group_id = str(auth._id)
else :
pass
op = node_collection.collection.update({'_id': ObjectId(node_id)}, {'$set': {'status': u"HIDDEN"}})
node=node_collection.one({'_id':ObjectId(node_id)})
#send notifications to all group members
colg=node_collection.one({'_id':ObjectId(group_id)})
for each in colg.author_set:
if each != colg.created_by:
bx=get_userobject(each)
if bx:
activity=request.user.username+" -deleted forum "
msg=activity+"-"+node.name+"- in the group '"+ colg.name
# no_check=forum_notification_status(group_id,auth._id)
# if no_check:
ret = set_notif_val(request,group_id,msg,activity,bx)
activity=request.user.username+" -deleted forum "
bx=get_userobject(colg.created_by)
if bx:
msg=activity+"-"+node.name+"- in the group '"+colg.name+"' created by you"
# no_check=forum_notification_status(group_id,auth._id)
# if no_check:
ret = set_notif_val(request,group_id,msg,activity,bx)
return HttpResponseRedirect(reverse('forum', kwargs={'group_id': group_id}))
@login_required
@get_execution_time
def delete_thread(request,group_id,forum_id,node_id):
""" Changing status of thread to HIDDEN
"""
ins_objectid = ObjectId()
if ins_objectid.is_valid(node_id) :
thread=node_collection.one({'_id':ObjectId(node_id)})
else:
return
forum = node_collection.one({'_id': ObjectId(forum_id)})
if ins_objectid.is_valid(group_id) is False :
group_ins = node_collection.find_one({'_type': "Group","name": group_id})
auth = node_collection.one({'_type': 'Author', 'name': unicode(request.user.username) })
if group_ins:
group_id = str(group_ins._id)
else :
auth = node_collection.one({'_type': 'Author', 'name': unicode(request.user.username) })
if auth :
group_id = str(auth._id)
else :
pass
op = node_collection.collection.update({'_id': ObjectId(node_id)}, {'$set': {'status': u"HIDDEN"}})
node=node_collection.one({'_id':ObjectId(node_id)})
forum_threads = []
exstng_reply = node_collection.find({'$and':[{'_type':'GSystem'},{'prior_node':ObjectId(forum._id)}],'status':{'$nin':['HIDDEN']}})
exstng_reply.sort('created_at')
forum_node=node_collection.one({'_id':ObjectId(forum_id)})
for each in exstng_reply:
forum_threads.append(each.name)
#send notifications to all group members
colg=node_collection.one({'_id':ObjectId(group_id)})
for each in colg.author_set:
if each != colg.created_by:
bx=get_userobject(each)
if bx:
activity=request.user.username+" -deleted thread "
prefix=" in the forum "+forum_node.name
link="http://"+sitename+"/"+str(colg._id)+"/forum/"+str(forum_node._id)
msg=activity+"-"+node.name+prefix+"- in the group '"+colg.name+"' created by you."+"'\n"+"Please visit "+link+" to see the forum."
# no_check=forum_notification_status(group_id,auth._id)
# if no_check:
ret = set_notif_val(request,group_id,msg,activity,bx)
activity=request.user.username+" -deleted thread "
prefix=" in the forum "+forum_node.name
bx=get_userobject(colg.created_by)
if bx:
link="http://"+sitename+"/"+str(colg._id)+"/forum/"+str(forum_node._id)
msg=activity+"-"+node.name+prefix+"- in the group '"+colg.name+"' created by you."+"'\n"+"Please visit "+link+" to see the forum."
# no_check=forum_notification_status(group_id,auth._id)
# if no_check:
ret = set_notif_val(request,group_id,msg,activity,bx)
#send notification code ends here
variables = RequestContext(request,{
'forum':forum,
'groupid':group_id,'group_id':group_id,
'forum_created_by':User.objects.get(id=forum.created_by).username
})
return render_to_response("ndf/forumdetails.html",variables)
@login_required
@get_execution_time
def edit_thread(request,group_id,forum_id,thread_id):
ins_objectid = ObjectId()
if ins_objectid.is_valid(group_id) is False :
group_ins = node_collection.find_one({'_type': "Group","name": group_id})
auth = node_collection.one({'_type': 'Author', 'name': unicode(request.user.username) })
if group_ins:
group_id = str(group_ins._id)
else :
auth = node_collection.one({'_type': 'Author', 'name': unicode(request.user.username) })
if auth :
group_id = str(auth._id)
else :
pass
forum=node_collection.one({'_id':ObjectId(forum_id)})
thread=node_collection.one({'_id':ObjectId(thread_id)})
exstng_reply = node_collection.find({'$and':[{'_type':'GSystem'},{'prior_node':ObjectId(forum._id)}]})
nodes=[]
exstng_reply.sort('created_at')
for each in exstng_reply:
nodes.append(each.name)
request.session['nodes']=json.dumps(nodes)
colg=node_collection.one({'_id':ObjectId(group_id)})
if request.method == 'POST':
name = unicode(request.POST.get('thread_name',"")) # thread name
thread.name = name
content_org = request.POST.get('content_org',"") # thread content
print "content=",content_org
if content_org:
thread.content_org = unicode(content_org)
usrname = request.user.username
filename = slugify(name) + "-" + usrname + "-"
thread.content = org2html(content_org, file_prefix=filename)
thread.save()
link="http://"+sitename+"/"+str(colg._id)+"/forum/thread/"+str(thread._id)
for each in colg.author_set:
if each != colg.created_by:
bx=get_userobject(each)
if bx:
msg=request.user.username+" has edited thread- "+thread.name+"- in the forum " + forum.name + " in the group -'" + colg.name+"'\n"+"Please visit "+link+" to see the thread."
activity="Edited thread"
#auth = node_collection.one({'_type': 'Author', 'name': unicode(bx.username) })
#if colg._id and auth:
#no_check=forum_notification_status(colg._id,auth._id)
# else:
# no_check=True
# if no_check:
ret = set_notif_val(request,colg._id,msg,activity,bx)
activity=request.user.username+" edited thread -"
bx=get_userobject(colg.created_by)
prefix="-in the forum -"+forum.name
if bx:
msg=activity+"-"+thread.name+prefix+" in the group '"+colg.name+"' created by you"+"\n"+"Please visit "+link+" to see the thread"
# no_check=forum_notification_status(group_id,auth._id)
# if no_check:
ret = set_notif_val(request,group_id,msg,activity,bx)
variables = RequestContext(request,{'group_id':group_id,'thread_id': thread._id,'nodes':json.dumps(nodes)})
return HttpResponseRedirect(reverse('thread', kwargs={'group_id':group_id,'thread_id': thread._id }))
else:
return render_to_response("ndf/edit_thread.html",
{ 'group_id':group_id,
'groupid':group_id,
'forum': forum,
'thread':thread,
'forum_created_by':User.objects.get(id=forum.created_by).username
},
RequestContext(request))
@login_required
@get_execution_time
def delete_reply(request,group_id,forum_id,thread_id,node_id):
ins_objectid = ObjectId()
if ins_objectid.is_valid(group_id) is False :
group_ins = node_collection.find_one({'_type': "Group","name": group_id})
auth = node_collection.one({'_type': 'Author', 'name': unicode(request.user.username) })
if group_ins:
group_id = str(group_ins._id)
else :
auth = node_collection.one({'_type': 'Author', 'name': unicode(request.user.username) })
if auth :
group_id = str(auth._id)
else :
pass
op = node_collection.collection.update({'_id': ObjectId(node_id)}, {'$set': {'status': u"HIDDEN"}})
replyobj=node_collection.one({'_id':ObjectId(node_id)})
forumobj=node_collection.one({"_id": ObjectId(forum_id)})
threadobj=node_collection.one({"_id": ObjectId(thread_id)})
# notifications to all group members
colg=node_collection.one({'_id':ObjectId(group_id)})
link="http://"+sitename+"/"+str(colg._id)+"/forum/thread/"+str(threadobj._id)
for each in colg.author_set:
if each != colg.created_by:
bx=get_userobject(each)
if bx:
msg=request.user.username+" has deleted reply- "+replyobj.content_org+"- in the thread " + threadobj.name + " in the group -'" + colg.name+"'\n"+"Please visit "+link+" to see the thread."
activity="Deleted reply"
#auth = node_collection.one({'_type': 'Author', 'name': unicode(bx.username) })
#if colg._id and auth:
#no_check=forum_notification_status(colg._id,auth._id)
# else:
# no_check=True
# if no_check:
ret = set_notif_val(request,colg._id,msg,activity,bx)
prefix="-in the forum -"+forumobj.name
msg=request.user.username+" has deleted reply- "+replyobj.content_org+"- in the thread " + threadobj.name +prefix+ " in the group -'" + colg.name+"' created by you"+"\n Please visit "+link+" to see the thread."
bx=get_userobject(colg.created_by)
if bx:
# no_check=forum_notification_status(group_id,auth._id)
# if no_check:
ret = set_notif_val(request,group_id,msg,activity,bx)
variables=RequestContext(request,{'thread':threadobj,'user':request.user,'forum':forumobj,'groupid':group_id,'group_id':group_id})
return HttpResponseRedirect(reverse('thread', kwargs={'group_id':group_id,'thread_id': threadobj._id }))
# return render_to_response("ndf/replytwistrep.html",variables)
|
sunnychaudhari/gstudio
|
gnowsys-ndf/gnowsys_ndf/ndf/views/forum.py
|
Python
|
agpl-3.0
| 40,863
|
[
"VisIt"
] |
41f1c66c32e57943bb1f2b183dff2d77d69c117ee6de78289c18bfb04f259f93
|
# Copyright 2017-2020 The GPflow Contributors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from .base import MonteCarloLikelihood
from .scalar_continuous import Gaussian
class GaussianMC(MonteCarloLikelihood, Gaussian):
"""
Stochastic version of Gaussian likelihood for demonstration purposes only.
"""
pass
|
GPflow/GPflow
|
gpflow/likelihoods/misc.py
|
Python
|
apache-2.0
| 847
|
[
"Gaussian"
] |
e841af835668fca17f40c7fed85259e6ba51cb31a18ffff8297dafbffea22bad
|
"""
Choropleth of parking tickets issued to state by precinct in NYC
================================================================
This example plots a subset of parking tickets issued to drivers in New York City.
Specifically, it plots the subset of tickets issued in the city which are more common than average
for that state than the average. This difference between "expected tickets issued" and "actual
tickets issued" is interesting because it shows which areas visitors driving into the city
from a specific state are more likely visit than their peers from other states.
Observations that can be made based on this plot include:
* Only New Yorkers visit Staten Island.
* Drivers from New Jersey, many of whom likely work in New York City, bias towards Manhattan.
* Drivers from Pennsylvania and Connecticut bias towards the borough closest to their state:
The Bronx for Connecticut, Brooklyn for Pennsylvania.
This example was inspired by the blog post `"Californians love Brooklyn, New Jerseyans love
Midtown: Mapping NYC’s Visitors Through Parking Tickets"
<https://iquantny.tumblr.com/post/84393789169/californians-love-brooklyn-new-jerseyans-love>`_.
"""
import geopandas as gpd
import geoplot as gplt
import geoplot.crs as gcrs
import matplotlib.pyplot as plt
# load the data
nyc_boroughs = gpd.read_file(gplt.datasets.get_path('nyc_boroughs'))
tickets = gpd.read_file(gplt.datasets.get_path('nyc_parking_tickets'))
proj = gcrs.AlbersEqualArea(central_latitude=40.7128, central_longitude=-74.0059)
def plot_state_to_ax(state, ax):
gplt.choropleth(
tickets.set_index('id').loc[:, [state, 'geometry']],
hue=state, cmap='Blues',
linewidth=0.0, ax=ax
)
gplt.polyplot(
nyc_boroughs, edgecolor='black', linewidth=0.5, ax=ax
)
f, axarr = plt.subplots(2, 2, figsize=(12, 12), subplot_kw={'projection': proj})
plt.suptitle('Parking Tickets Issued to State by Precinct, 2016', fontsize=16)
plt.subplots_adjust(top=0.95)
plot_state_to_ax('ny', axarr[0][0])
axarr[0][0].set_title('New York (n=6,679,268)')
plot_state_to_ax('nj', axarr[0][1])
axarr[0][1].set_title('New Jersey (n=854,647)')
plot_state_to_ax('pa', axarr[1][0])
axarr[1][0].set_title('Pennsylvania (n=215,065)')
plot_state_to_ax('ct', axarr[1][1])
axarr[1][1].set_title('Connecticut (n=126,661)')
plt.savefig("nyc-parking-tickets.png", bbox_inches='tight')
|
ResidentMario/geoplot
|
examples/plot_nyc_parking_tickets.py
|
Python
|
mit
| 2,395
|
[
"VisIt"
] |
74f1ca78603b5c12246aac65f58728b9424757199d646c4d4b520d5ca95200b2
|
#! /usr/bin/env python
# -*- coding: UTF-8 -*-
import os
import sys
import setuptools
from distutils.command.clean import clean as _clean
from distutils.command.build import build as _build
from setuptools.command.sdist import sdist as _sdist
from setuptools.command.build_ext import build_ext as _build_ext
try:
import multiprocessing
assert multiprocessing
except ImportError:
pass
def strip_comments(l):
return l.split('#', 1)[0].strip()
def reqs(filename):
with open(os.path.join(os.getcwd(),
'requirements',
filename)) as fp:
return filter(None, [strip_comments(l)
for l in fp.readlines()])
setup_ext = {}
if os.path.isfile('gulpfile.js'):
# 如果 gulpfile.js 存在, 就压缩前端代码
def gulp_build(done=[]):
if not done:
if os.system('npm install '
'--disturl=http://dist.u.qiniudn.com '
'--registry=http://r.cnpmjs.org'):
sys.exit(1)
if os.system('bower install'):
sys.exit(1)
if os.system('gulp build'):
sys.exit(1)
done.append(1)
def gulp_clean(done=[]):
if not done:
if os.system('npm install '
'--disturl=http://dist.u.qiniudn.com '
'--registry=http://r.cnpmjs.org'):
sys.exit(1)
if os.system('gulp clean'):
sys.exit(1)
done.append(1)
class build(_build):
sub_commands = _build.sub_commands[:]
# force to build ext
for ix, (name, checkfunc) in enumerate(sub_commands):
if name == 'build_ext':
sub_commands[ix] = (name, lambda self: True)
class build_ext(_build_ext):
def run(self):
gulp_build()
_build_ext.run(self)
class sdist(_sdist):
def run(self):
gulp_build()
_sdist.run(self)
class clean(_clean):
def run(self):
_clean.run(self)
gulp_clean()
setup_ext = {'cmdclass': {'sdist': sdist,
'clean': clean,
'build': build,
'build_ext': build_ext}}
setup_params = dict(
name="qsapp-official",
url="http://wiki.yimiqisan.com/",
version='1.0',
author="qisan",
author_email="qisanstudio@gmail.com",
packages=setuptools.find_packages('src'),
package_dir={'': 'src'},
include_package_data=True,
zip_safe=False,
install_requires=reqs('install.txt'))
setup_params.update(setup_ext)
if __name__ == '__main__':
setuptools.setup(**setup_params)
|
qisanstudio/qsapp-official
|
setup.py
|
Python
|
mit
| 2,778
|
[
"GULP"
] |
d16da3b26998bba3d2443ae0d19a029fec0ac5c262493143ad3f2da6a4022b2a
|
# Copyright 2009-2013 by Peter Cock. All rights reserved.
# This code is part of the Biopython distribution and governed by its
# license. Please see the LICENSE file that should have been included
# as part of this package.
from __future__ import print_function
from Bio._py3k import _universal_read_mode
import os
import sys
import subprocess
import unittest
from Bio.Application import _escape_filename
from Bio import MissingExternalDependencyError
from Bio.Align.Applications import MuscleCommandline
from Bio import SeqIO
from Bio import AlignIO
#################################################################
# Try to avoid problems when the OS is in another language
os.environ['LANG'] = 'C'
muscle_exe = None
if sys.platform=="win32":
try:
# This can vary depending on the Windows language.
prog_files = os.environ["PROGRAMFILES"]
except KeyError:
prog_files = r"C:\Program Files"
# For Windows, MUSCLE just comes as a zip file which contains the
# a Muscle directory with the muscle.exe file plus a readme etc,
# which the user could put anywhere. We'll try a few sensible
# locations under Program Files... and then the full path.
likely_dirs = ["", # Current dir
prog_files,
os.path.join(prog_files, "Muscle3.6"),
os.path.join(prog_files, "Muscle3.7"),
os.path.join(prog_files, "Muscle3.8"),
os.path.join(prog_files, "Muscle3.9"),
os.path.join(prog_files, "Muscle")] + sys.path
for folder in likely_dirs:
if os.path.isdir(folder):
if os.path.isfile(os.path.join(folder, "muscle.exe")):
muscle_exe = os.path.join(folder, "muscle.exe")
break
if muscle_exe:
break
else:
from Bio._py3k import getoutput
output = getoutput("muscle -version")
# Since "not found" may be in another language, try and be sure this is
# really the MUSCLE tool's output
if "not found" not in output and "MUSCLE" in output \
and "Edgar" in output:
muscle_exe = "muscle"
if not muscle_exe:
raise MissingExternalDependencyError(
"Install MUSCLE if you want to use the Bio.Align.Applications wrapper.")
#################################################################
class MuscleApplication(unittest.TestCase):
def setUp(self):
self.infile1 = "Fasta/f002"
self.infile2 = "Fasta/fa01"
self.infile3 = "Fasta/f001"
self.outfile1 = "Fasta/temp align out1.fa" # with spaces!
self.outfile2 = "Fasta/temp_align_out2.fa"
self.outfile3 = "Fasta/temp_align_out3.fa"
self.outfile4 = "Fasta/temp_align_out4.fa"
def tearDown(self):
if os.path.isfile(self.outfile1):
os.remove(self.outfile1)
if os.path.isfile(self.outfile2):
os.remove(self.outfile2)
if os.path.isfile(self.outfile3):
os.remove(self.outfile3)
if os.path.isfile(self.outfile4):
os.remove(self.outfile4)
def test_Muscle_simple(self):
"""Simple round-trip through app just infile and outfile"""
cmdline = MuscleCommandline(muscle_exe,
input=self.infile1,
out=self.outfile1)
self.assertEqual(str(cmdline), _escape_filename(muscle_exe)
+ ' -in Fasta/f002 -out "Fasta/temp align out1.fa"')
self.assertEqual(str(eval(repr(cmdline))), str(cmdline))
output, error = cmdline()
self.assertEqual(output, "")
self.assertTrue("ERROR" not in error)
def test_Muscle_with_options(self):
"""Round-trip through app with a switch and valued option"""
cmdline = MuscleCommandline(muscle_exe)
cmdline.set_parameter("input", self.infile1) # "input" is alias for "in"
cmdline.set_parameter("out", self.outfile2)
# Use property:
cmdline.objscore = "sp"
cmdline.noanchors = True
self.assertEqual(str(cmdline), _escape_filename(muscle_exe) +
" -in Fasta/f002" +
" -out Fasta/temp_align_out2.fa" +
" -objscore sp -noanchors")
self.assertEqual(str(eval(repr(cmdline))), str(cmdline))
output, error = cmdline()
self.assertEqual(output, "")
self.assertTrue("ERROR" not in error)
self.assertTrue(error.strip().startswith("MUSCLE"), output)
def test_Muscle_profile_simple(self):
"""Simple round-trip through app doing a profile alignment"""
cmdline = MuscleCommandline(muscle_exe)
cmdline.set_parameter("out", self.outfile3)
cmdline.set_parameter("profile", True)
cmdline.set_parameter("in1", self.infile2)
cmdline.set_parameter("in2", self.infile3)
self.assertEqual(str(cmdline), _escape_filename(muscle_exe) +
" -out Fasta/temp_align_out3.fa" +
" -profile -in1 Fasta/fa01 -in2 Fasta/f001")
self.assertEqual(str(eval(repr(cmdline))), str(cmdline))
output, error = cmdline()
self.assertEqual(output, "")
self.assertTrue("ERROR" not in error)
self.assertTrue(error.strip().startswith("MUSCLE"), output)
def test_Muscle_profile_with_options(self):
"""Profile alignment, and switch and valued options"""
# Using some keyword arguments, note -stable isn't supported in v3.8
cmdline = MuscleCommandline(muscle_exe, out=self.outfile4,
in1=self.infile2, in2=self.infile3,
profile=True, stable=True,
cluster1="neighborjoining")
self.assertEqual(str(cmdline), _escape_filename(muscle_exe) +
" -out Fasta/temp_align_out4.fa" +
" -profile -in1 Fasta/fa01 -in2 Fasta/f001" +
" -cluster1 neighborjoining -stable")
self.assertEqual(str(eval(repr(cmdline))), str(cmdline))
"""
#TODO - Why doesn't this work with MUSCLE 3.6 on the Mac?
#It may be another bug fixed in MUSCLE 3.7 ...
result, stdout, stderr = generic_run(cmdline)
#NOTE: generic_run has been removed from Biopython
self.assertEqual(result.return_code, 0)
self.assertEqual(stdout.read(), "")
self.assertTrue("ERROR" not in stderr.read())
self.assertEqual(str(result._cl), str(cmdline))
"""
class SimpleAlignTest(unittest.TestCase):
"""Simple MUSCLE tests"""
"""
#FASTA output seems broken on Muscle 3.6 (on the Mac).
def test_simple_fasta(self):
input_file = "Fasta/f002"
self.assertTrue(os.path.isfile(input_file))
records = list(SeqIO.parse(input_file,"fasta"))
#Prepare the command...
cmdline = MuscleCommandline(muscle_exe)
cmdline.set_parameter("in", input_file)
#Preserve input record order (makes checking output easier)
cmdline.set_parameter("stable")
#Set some others options just to test them
cmdline.set_parameter("maxiters", 2)
self.assertEqual(str(cmdline).rstrip(), "muscle -in Fasta/f002 -maxiters 2 -stable")
result, out_handle, err_handle = generic_run(cmdline)
#NOTE: generic_run has been removed from Biopython
print(err_handle.read())
print(out_handle.read())
align = AlignIO.read(out_handle, "fasta")
self.assertEqual(len(records),len(align))
for old, new in zip(records, align):
self.assertEqual(old.id, new.id)
self.assertEqual(str(new.seq).replace("-",""), str(old.seq))
"""
def test_simple_clustal(self):
"""Simple muscle call using Clustal output with a MUSCLE header"""
input_file = "Fasta/f002"
self.assertTrue(os.path.isfile(input_file))
records = list(SeqIO.parse(input_file, "fasta"))
records.sort(key=lambda rec: rec.id)
# Prepare the command... use Clustal output (with a MUSCLE header)
cmdline = MuscleCommandline(muscle_exe, input=input_file, clw=True)
self.assertEqual(str(cmdline).rstrip(), _escape_filename(muscle_exe) +
" -in Fasta/f002 -clw")
self.assertEqual(str(eval(repr(cmdline))), str(cmdline))
child = subprocess.Popen(str(cmdline),
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
universal_newlines=True,
shell=(sys.platform!="win32"))
# Didn't use -quiet so there should be progress reports on stderr,
align = AlignIO.read(child.stdout, "clustal")
align.sort() # by record.id
self.assertTrue(child.stderr.read().strip().startswith("MUSCLE"))
return_code = child.wait()
self.assertEqual(return_code, 0)
child.stdout.close()
child.stderr.close()
del child
self.assertEqual(len(records), len(align))
for old, new in zip(records, align):
self.assertEqual(old.id, new.id)
self.assertEqual(str(new.seq).replace("-", ""), str(old.seq))
def test_simple_clustal_strict(self):
"""Simple muscle call using strict Clustal output"""
input_file = "Fasta/f002"
self.assertTrue(os.path.isfile(input_file))
records = list(SeqIO.parse(input_file, "fasta"))
records.sort(key=lambda rec: rec.id)
# Prepare the command...
cmdline = MuscleCommandline(muscle_exe)
cmdline.set_parameter("in", input_file)
# Use clustal output (with a CLUSTAL header)
cmdline.set_parameter("clwstrict", True) # Default None treated as False!
self.assertEqual(str(cmdline).rstrip(), _escape_filename(muscle_exe) +
" -in Fasta/f002 -clwstrict")
self.assertEqual(str(eval(repr(cmdline))), str(cmdline))
child = subprocess.Popen(str(cmdline),
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
universal_newlines=True,
shell=(sys.platform!="win32"))
# Didn't use -quiet so there should be progress reports on stderr,
align = AlignIO.read(child.stdout, "clustal")
align.sort()
self.assertTrue(child.stderr.read().strip().startswith("MUSCLE"))
self.assertEqual(len(records), len(align))
for old, new in zip(records, align):
self.assertEqual(old.id, new.id)
self.assertEqual(str(new.seq).replace("-", ""), str(old.seq))
return_code = child.wait()
self.assertEqual(return_code, 0)
child.stdout.close()
child.stderr.close()
del child
def test_long(self):
"""Simple muscle call using long file"""
# Create a large input file by converting some of another example file
temp_large_fasta_file = "temp_cw_prot.fasta"
records = list(SeqIO.parse("NBRF/Cw_prot.pir", "pir"))[:40]
SeqIO.write(records, temp_large_fasta_file, "fasta")
# Prepare the command...
cmdline = MuscleCommandline(muscle_exe)
cmdline.set_parameter("in", temp_large_fasta_file)
# Use fast options
cmdline.set_parameter("maxiters", 1)
cmdline.set_parameter("diags", True) # Default None treated as False!
# Use clustal output
cmdline.set_parameter("clwstrict", True) # Default None treated as False!
# Shoudn't need this, but just to make sure it is accepted
cmdline.set_parameter("maxhours", 0.1)
# No progress reports to stderr
cmdline.set_parameter("quiet", True) # Default None treated as False!
self.assertEqual(str(cmdline).rstrip(), _escape_filename(muscle_exe) +
" -in temp_cw_prot.fasta -diags -maxhours 0.1" +
" -maxiters 1 -clwstrict -quiet")
self.assertEqual(str(eval(repr(cmdline))), str(cmdline))
child = subprocess.Popen(str(cmdline),
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
universal_newlines=True,
shell=(sys.platform!="win32"))
align = AlignIO.read(child.stdout, "clustal")
align.sort()
records.sort(key=lambda rec: rec.id)
self.assertEqual(len(records), len(align))
for old, new in zip(records, align):
self.assertEqual(old.id, new.id)
self.assertEqual(str(new.seq).replace("-", ""), str(old.seq))
# See if quiet worked:
self.assertEqual("", child.stderr.read().strip())
return_code = child.wait()
self.assertEqual(return_code, 0)
child.stdout.close()
child.stderr.close()
del child
os.remove(temp_large_fasta_file)
def test_using_stdin(self):
"""Simple alignment using stdin"""
input_file = "Fasta/f002"
self.assertTrue(os.path.isfile(input_file))
records = list(SeqIO.parse(input_file, "fasta"))
# Prepare the command... use Clustal output (with a MUSCLE header)
cline = MuscleCommandline(muscle_exe, clw=True)
self.assertEqual(str(cline).rstrip(),
_escape_filename(muscle_exe) + " -clw")
self.assertEqual(str(eval(repr(cline))), str(cline))
child = subprocess.Popen(str(cline),
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
universal_newlines=True,
shell=(sys.platform!="win32"))
SeqIO.write(records, child.stdin, "fasta")
child.stdin.close()
# Alignment will now run...
align = AlignIO.read(child.stdout, "clustal")
align.sort()
records.sort(key=lambda rec: rec.id)
self.assertEqual(len(records), len(align))
for old, new in zip(records, align):
self.assertEqual(old.id, new.id)
self.assertEqual(str(new.seq).replace("-", ""), str(old.seq))
self.assertEqual(0, child.wait())
child.stdout.close()
child.stderr.close()
del child
def test_with_multiple_output_formats(self):
"""Simple muscle call with multiple output formats"""
input_file = "Fasta/f002"
output_html = "temp_f002.html"
output_clwstrict = "temp_f002.clw"
self.assertTrue(os.path.isfile(input_file))
records = list(SeqIO.parse(input_file, "fasta"))
records.sort(key=lambda rec: rec.id)
# Prepare the command... use Clustal output (with a MUSCLE header)
cmdline = MuscleCommandline(muscle_exe, input=input_file,
clw=True, htmlout=output_html,
clwstrictout=output_clwstrict)
self.assertEqual(str(cmdline).rstrip(), _escape_filename(muscle_exe) +
" -in Fasta/f002 -clw -htmlout temp_f002.html" +
" -clwstrictout temp_f002.clw")
self.assertEqual(str(eval(repr(cmdline))), str(cmdline))
child = subprocess.Popen(str(cmdline),
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
universal_newlines=True,
shell=(sys.platform!="win32"))
# Clustalw on stdout:
align = AlignIO.read(child.stdout, "clustal")
align.sort()
# Didn't use -quiet so there should be progress reports on stderr,
self.assertTrue(child.stderr.read().strip().startswith("MUSCLE"))
return_code = child.wait()
self.assertEqual(return_code, 0)
self.assertEqual(len(records), len(align))
for old, new in zip(records, align):
self.assertEqual(old.id, new.id)
child.stdout.close()
child.stderr.close()
del child
handle = open(output_html, _universal_read_mode)
html = handle.read().strip().upper()
handle.close()
self.assertTrue(html.startswith("<HTML"))
self.assertTrue(html.endswith("</HTML>"))
# ClustalW strict:
align = AlignIO.read(output_clwstrict, "clustal")
align.sort()
self.assertEqual(len(records), len(align))
for old, new in zip(records, align):
self.assertEqual(old.id, new.id)
os.remove(output_html)
os.remove(output_clwstrict)
if __name__ == "__main__":
runner = unittest.TextTestRunner(verbosity=2)
unittest.main(testRunner=runner)
|
updownlife/multipleK
|
dependencies/biopython-1.65/Tests/test_Muscle_tool.py
|
Python
|
gpl-2.0
| 16,947
|
[
"Biopython"
] |
33f0dab8693902d27f170302fdad3dab14f71e12ee24b8186cd49dd48be3119b
|
#!/usr/bin/env python
# Copyright 2014-2018 The PySCF Developers. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Author: Qiming Sun <osirpt.sun@gmail.com>
#
import unittest
import numpy
from pyscf import lib
from pyscf import gto, dft
from pyscf import tdscf
from pyscf.grad import tdrks as tdrks_grad
mol = gto.Mole()
mol.verbose = 0
mol.output = None
mol.atom = [
['H' , (0. , 0. , 1.804)],
['F' , (0. , 0. , 0.)], ]
mol.unit = 'B'
mol.basis = '631g'
mol.build()
mf_lda = dft.RKS(mol).set(xc='LDA,')
mf_lda.grids.prune = False
mf_lda.kernel()
mf_gga = dft.RKS(mol).set(xc='b88,')
mf_gga.grids.prune = False
mf_gga.kernel()
def tearDownModule():
global mol, mf_lda
del mol, mf_lda
class KnownValues(unittest.TestCase):
def test_tda_singlet_lda(self):
td = tdscf.TDA(mf_lda).run(nstates=3)
tdg = td.nuc_grad_method()
g1 = tdg.kernel(td.xy[2])
self.assertAlmostEqual(g1[0,2], -9.23916667e-02, 8)
# def test_tda_triplet_lda(self):
# td = tdscf.TDA(mf_lda).run(singlet=False, nstates=3)
# tdg = td.nuc_grad_method()
# g1 = tdg.kernel(state=3)
# self.assertAlmostEqual(g1[0,2], -9.23916667e-02, 8)
def test_tda_singlet_b88(self):
td = tdscf.TDA(mf_gga).run(nstates=3)
tdg = td.nuc_grad_method()
g1 = tdg.kernel(state=3)
self.assertAlmostEqual(g1[0,2], -9.32506535e-02, 8)
# def test_tda_triplet_b88(self):
# td = tdscf.TDA(mf_gga).run(singlet=False, nstates=3)
# tdg = td.nuc_grad_method()
# g1 = tdg.kernel(state=3)
# self.assertAlmostEqual(g1[0,2], -9.32506535e-02, 8)
def test_tddft_lda(self):
td = tdscf.TDDFT(mf_lda).run(nstates=3)
tdg = td.nuc_grad_method()
g1 = tdg.kernel(state=3)
self.assertAlmostEqual(g1[0,2], -1.31315477e-01, 8)
def test_tddft_b3lyp_high_cost(self):
mf = dft.RKS(mol)
mf.xc = 'b3lyp'
mf._numint.libxc = dft.xcfun
mf.grids.prune = False
mf.scf()
td = tdscf.TDDFT(mf).run(nstates=3)
tdg = td.nuc_grad_method()
g1 = tdg.kernel(state=3)
self.assertAlmostEqual(g1[0,2], -1.55778110e-01, 7)
def test_range_separated_high_cost(self):
mol = gto.M(atom="H; H 1 1.", basis='631g', verbose=0)
mf = dft.RKS(mol).set(xc='CAMB3LYP')
mf._numint.libxc = dft.xcfun
td = mf.apply(tdscf.TDA)
tdg_scanner = td.nuc_grad_method().as_scanner().as_scanner()
g = tdg_scanner(mol, state=3)[1]
self.assertAlmostEqual(lib.finger(g), 0.60109310253094916, 7)
smf = td.as_scanner()
e1 = smf(mol.set_geom_("H; H 1 1.001"))[2]
e2 = smf(mol.set_geom_("H; H 1 0.999"))[2]
self.assertAlmostEqual((e1-e2)/0.002*lib.param.BOHR, g[1,0], 4)
if __name__ == "__main__":
print("Full Tests for TD-RKS gradients")
unittest.main()
|
gkc1000/pyscf
|
pyscf/grad/test/test_tdrks_grad.py
|
Python
|
apache-2.0
| 3,409
|
[
"PySCF"
] |
945178c8c7497f4880b7ce37200547c855a731d6d4c29df9bdad82a903a0ff51
|
# -*- coding: utf-8 -*-
"""
This file is part of pyCMBS.
(c) 2012- Alexander Loew
For COPYING and LICENSE details, please refer to the LICENSE file
"""
import os
import sys
import numpy as np
import pylab as pl
import glob
from cdo import Cdo
from pycmbs.data import Data
from geoval.region import Region, RegionParser
from geoval.polygon import Raster
from geoval.polygon import Polygon as pycmbsPolygon
from pycmbs.benchmarking.utils import get_data_pool_directory
from pycmbs.benchmarking.utils import get_generic_landseamask, get_T63_landseamask
class ConfigFile(object):
"""
class to read pyCMBS configuration file
"""
def __init__(self, file):
"""
file : str
name of parameter file to parse
"""
self.file = file
if not os.path.exists(self.file):
raise ValueError('Configuration file not \
existing: % ' % self.file)
else:
self.f = open(file, 'r')
self.read()
def __read_header(self):
"""
read commented lines until next valid line
"""
x = '####'
while x[0] == '#':
a = self.f.readline().replace('\n', '')
x = a.lstrip()
if len(x) == 0: # only whitespaces
x = '#'
return x
def __check_bool(self, x):
s = x.split(',')
if int(s[1]) == 1:
return True
else:
return False
def __check_var(self, x):
s = x.split(',')
if int(s[1]) == 1:
return s[0], s[2] # name,interval
else:
return None, None
def __read_options(self):
# read header of variable plot
self.options = {}
l = self.__read_header()
l = l.lstrip()
if 'BASEMAP' in l.upper():
self.options.update({'basemap': self.__check_bool(l)})
l = self.f.readline().replace('\n', '')
if 'REPORT=' in l.upper():
s = l[7:]
self.options.update({'report': s.replace(' ', '')})
else:
raise ValueError('Report missing in configuration file!')
l = self.f.readline().replace('\n', '')
if 'REPORT_FORMAT=' in l.upper():
s = l[14:].strip()
s = s.lower()
if s not in ['png', 'pdf']:
raise ValueError('Invlid option for report format [png,pdf]: %s' % s)
else:
self.options.update({'report_format': s})
else:
raise ValueError('report format missing in configuration file!')
l = self.f.readline().replace('\n', '')
if 'AUTHOR=' in l.upper():
s = l[7:]
self.options.update({'author': s})
else:
raise ValueError('author missing in configuration file!')
l = self.f.readline().replace('\n', '')
if 'TEMP_DIR=' in l.upper():
s = l[9:]
if s[-1] != os.sep:
s = s + os.sep
self.options.update({'tempdir': s.replace(' ', '')})
else:
raise ValueError('Temporary directory not specified!')
l = self.f.readline().replace('\n', '')
if 'CLEAN_TEMPDIR' in l.upper():
self.options.update({'cleandir': self.__check_bool(l)})
else:
raise ValueError('Invalid option for clean_tempdir!')
l = self.f.readline().replace('\n', '')
if 'SUMMARY_ONLY' in l.upper():
self.options.update({'summary': self.__check_bool(l)})
else:
raise ValueError('Invalid option for SUMMARY_ONLY!')
l = self.f.readline().replace('\n', '')
if 'CONFIG_DIR=' in l.upper():
s = l[11:]
if s[-1] != os.sep:
s = s + os.sep
if not os.path.exists(s):
raise ValueError('Configuration path is invalid: %s' % s)
self.options.update({'configdir': s.replace(' ', '')})
else:
raise ValueError('CONFIG directory not specified!')
l = self.f.readline().replace('\n', '')
if 'OUTPUT_DIRECTORY=' in l.upper():
s = l[17:]
if s[-1] != os.sep:
s = s + os.sep # this is the root output directory
if not os.path.exists(s):
os.makedirs(s)
self.options.update({'outputdir': s.replace(' ', '')})
else:
raise ValueError('OUTPUT directory not specified!')
# create / remove directories
if not os.path.exists(self.options['tempdir']):
print 'Creating temporary output directory: ', self.options['tempdir']
os.makedirs(self.options['tempdir'])
else:
if self.options['cleandir']:
print 'Cleaning output directory: ', self.options['tempdir']
import glob
files = glob.glob(self.options['tempdir'] + '*.nc')
for f in files:
os.remove(f)
else:
sys.stdout.write(' Temporary output directory already existing: ' + self.options['tempdir'] + '\n')
# update global variable for CDO temporary directory (needed for CDO processing)
os.environ.update({'CDOTEMPDIR': self.options['tempdir']})
def __read_var_block(self):
""" read header of variable plot """
vars = []
vars_interval = {}
l = self.__read_header()
r, interval = self.__check_var(l)
if r is not None:
vars.append(r)
vars_interval.update({r: interval})
while l[0] != '#':
l = self.f.readline().replace('\n', '')
l = l.lstrip()
if len(l) > 0:
if l[0] == '#':
pass
else:
r, interval = self.__check_var(l)
if r is not None:
vars.append(r)
vars_interval.update({r: interval})
else:
l = ' '
return vars, vars_interval
def __get_model_details(self, s):
return s.split(',')
def __read_date_block(self):
date1 = self.__read_header()
date2 = self.f.readline().replace('\n', '')
tmp = self.f.readline().replace('\n', '') # same time for observations
same_for_obs = self.__check_bool(tmp)
return date1, date2, same_for_obs
def __read_model_block(self):
models = []
types = []
experiments = []
ddirs = []
l = self.__read_header()
model, ty, experiment, ddir = self.__get_model_details(l)
ddir = ddir.rstrip()
if ddir[-1] != os.sep:
ddir = ddir + os.sep
models.append(model)
types.append(ty)
experiments.append(experiment)
ddirs.append(ddir.replace('\n', ''))
has_eof = False
while not has_eof:
try:
l = self.f.next()
l = l.lstrip()
if (len(l) > 0) & (l[0] != '#'):
model, ty, experiment, ddir = self.__get_model_details(l)
ddir = ddir.rstrip()
if ddir[-1] != os.sep:
ddir = ddir + os.sep
models.append(model)
types.append(ty)
experiments.append(experiment)
ddirs.append(ddir.replace('\n', ''))
except:
has_eof = True
return models, experiments, types, ddirs
def read(self):
"""
read configuration files in 3 blocks
"""
sys.stdout.write("\n *** Reading config file... \n")
self.__read_options()
self.variables, self.intervals = self.__read_var_block()
self.start_date, self.stop_date, self.same_time4obs = self.__read_date_block()
self.models, self.experiments, self.dtypes, self.dirs = self.__read_model_block()
for k in self.dtypes:
if k.upper() not in ['CMIP5', 'JSBACH_BOT', 'JSBACH_RAW',
'CMIP3', 'JSBACH_RAW2', 'CMIP5RAW', 'CMIP5RAWSINGLE', 'JSBACH_SPECIAL']:
raise ValueError('Unknown model type: %s' % k)
# ensure that up/down fluxes are analyzed in case of albedo
# in that case the same time sampling is used!
if 'albedo' in self.variables:
if 'sis' not in self.variables:
self.variables.append('sis')
self.intervals.update({'sis': self.intervals['albedo']})
if 'surface_upward_flux' not in self.variables:
self.variables.append('surface_upward_flux')
self.intervals.update({'surface_upward_flux': self.intervals['albedo']})
sys.stdout.write(" *** Done reading config file. \n")
def get_analysis_scripts(self):
"""
returns names of analysis scripts for all variables as a dictionary
in general.
The names of the different analysis routines are taken from file
<configuration_dir>/analysis_scripts.json
which has the format
VARIABLE,NAME OF ANALYSIS ROUTINE
"""
import json
jsonfile = self.options['configdir'] + 'analysis_routines.json'
if not os.path.exists(jsonfile):
raise ValueError('REQUIRED file analysis_routines.json not existing!')
d = json.load(open(jsonfile, 'r'))
return d
def get_methods4variables(self, variables):
"""
for a given list of variables, return a dictionary
with information on methods how to read the data
The actual information is coming from a json file
IMPORTANT: all options provided to the routines need to be
specified here and arguments must be set in calling
routine get_data()
Parameters
----------
variables : list
list of variables to be analzed
"""
jsonfile = self.options['configdir'] + 'model_data_routines.json'
import json
if not os.path.exists(jsonfile):
raise ValueError('File model_data_analysis.json MISSING!')
hlp = json.load(open(jsonfile, 'r'))
res = {}
for k in hlp.keys():
# only use the variables that should be analyzed!
if k in variables:
res.update({k: hlp[k]})
# ensure that for albedo processing also the routines
# for upward and downward shortwave flux are known
if 'albedo' in variables:
for k in ['surface_upward_flux', 'sis']:
if k in hlp.keys():
if k not in res.keys():
res.update({k: hlp[k]})
else:
err_msg = 'For albedo processing also the ' + k.upper() + ' routines need to be spectified!'
raise ValueError(err_msg)
# implement here also dependencies between variables for analysis
# e.g. phenology needs faPAR and snow cover fraction. Ensure here that
# snow cover is also read, even if only phenology option is set
if ('phenology_faPAR' in variables) and not ('snow' in variables):
res.update({'snow': hlp['snow']})
return res
class PlotOptions(object):
"""
Class for plot options
"""
def __init__(self):
self.options = {}
def _import_regional_file(self, region_file, varname, targetgrid=None, logfile=None):
"""
check if the regional file can be either imported or if
regions are provided as vector data. In the latter case
the regions are rasterized and results are stored in a netCDF
file
Parameters
----------
region_file : str
name of file defining the region. This is either a netCDF
file which contains the mask as different integer values
or it is a *.reg file which contains the regions as
vector data.
varname : str
name of variable in netCDF file
targetgrid : str
name of targetgrid; either 't63grid' or the name of a file
with a valid geometry
Returns
-------
region_filename, region_file_varname
"""
if not os.path.exists(region_file):
raise ValueError('ERROR: region file is not existing: ' + region_file)
ext = os.path.splitext(region_file)[1]
if ext == '.nc':
# netCDF file was given. Try to read variable
if varname is None:
raise ValueError('ERROR: no variable name given!')
try:
tmp = Data(region_file, varname, read=True)
except:
raise ValueError('ERROR: the regional masking file can not be read!')
del tmp
# everything is fine
return region_file, varname
elif ext == '.reg':
# regions were given as vector files. Read it and
# rasterize the data and store results in a temporary
# file
import tempfile
if targetgrid is None:
raise ValueError('ERROR: targetgrid needs to be specified for vectorization of regions!')
if targetgrid == 't63grid':
ls_mask = get_T63_landseamask(True, area='global', mask_antarctica=False)
else:
ls_mask = get_generic_landseamask(True, area='global', target_grid=targetgrid,
mask_antarctica=False)
# temporary netCDF filename
region_file1 = tempfile.mktemp(prefix='region_mask_', suffix='.nc')
R = RegionParser(region_file) # read region vector data
M = Raster(ls_mask.lon, ls_mask.lat)
polylist = []
if logfile is not None:
logf = open(logfile, 'w')
else:
logf = None
id = 1
for k in R.regions.keys():
reg = R.regions[k]
polylist.append(pycmbsPolygon(id, zip(reg.lon, reg.lat)))
if logf is not None: # store mapping table
logf.write(k + '\t' + str(id) + '\n')
id += 1
M.rasterize_polygons(polylist)
if logf is not None:
logf.close()
# generate dummy output file
O = Data(None, None)
O.data = M.mask
O.lat = ls_mask.lat
O.lon = ls_mask.lon
varname = 'regions'
O.save(region_file1, varname=varname, format='nc', delete=True)
print('Regionfile was store in file: %s' % region_file1)
# check again that file is readable
try:
tmp = Data(region_file1, varname, read=True)
except:
print region_file1, varname
raise ValueError('ERROR: the generated region file is not readable!')
del tmp
return region_file1, varname
else:
raise ValueError('ERROR: unsupported file type')
def read(self, cfg):
"""
read plot option files and store results in a dictionary
Parameters
----------
cfg : ConfigFile instance
cfg Instance of ConfigFile class which has been already
initialized (config file has been read already)
"""
from ConfigParser import SafeConfigParser
thevariables = cfg.variables
# ensure that up/down information also given, when albedo is used
#~ if 'albedo' in thevariables:
#~ if 'surface_upward_flux' not in thevariables:
#~ thevariables.append('surface_upward_flux')
#~ if 'sis' not in thevariables:
#~ thevariables.append('sis')
for var in thevariables:
parser = SafeConfigParser()
# The plot options are assumed to be in a file that has the same name as the variable to look be analyzed
file = cfg.options['configdir'] + var + '.ini'
if os.path.exists(file):
sys.stdout.write('\n *** Reading configuration for %s: ' % var + "\n")
parser.read(file)
else:
raise ValueError('Plot option file not existing: %s' % file)
"""
generate now a dictionary for each variable
in each file there needs to be an OPTIONS section that specifies
the options that shall be applied to all plots for all observational datasets
The other sections specify the details for each observational dataset
"""
# print('\n*** VARIABLE: %s ***' % var)
dl = {}
# print parser.sections()
for section_name in parser.sections():
# add global plotting options
if section_name.upper() == 'OPTIONS':
o = {}
for name, value in parser.items(section_name):
o.update({name: value})
dl.update({'OPTIONS': o})
else: # observation specific dictionary
o = {}
for name, value in parser.items(section_name):
o.update({name: value})
dl.update({section_name: o})
# update options dictionary for this variable
self.options.update({var: dl})
# destroy parser (important, as otherwise problems)
del parser
# convert options to bool/numerical values
self._convert_options()
# check options consistency
self._check()
#reset plotting options if the report should only produce a summary
if cfg.options['summary']:
# set the plot options to FALSE which are *not* relevant
# for summary report
false_vars = ['map_difference', 'map_seasons',
'reichler_plot', 'hovmoeller_plot',
'regional_analysis']
for var in thevariables:
lopt = self.options[var]
for vv in false_vars:
if vv in lopt['OPTIONS'].keys():
print 'Setting variable ', vv, ' to FALSE because of global option for ', var
lopt['OPTIONS'].update({vv: False})
# if the option is set that the observation time shall be
# the same as the models
# then overwrite options that were set in the INI files
if cfg.same_time4obs:
for var in thevariables:
lopt = self.options[var]
lopt['OPTIONS']['start'] = cfg.start_date
lopt['OPTIONS']['stop'] = cfg.stop_date
# map interpolation methods
# the interpolation method is used by the CDOs. It needs to be
# a value of [bilinear,conservative,nearest]
for var in thevariables:
lopt = self.options[var]
if lopt['OPTIONS']['interpolation'] == 'bilinear':
lopt['OPTIONS'].update({'interpolation': 'remapbil'})
elif lopt['OPTIONS']['interpolation'] == 'conservative':
lopt['OPTIONS'].update({'interpolation': 'remapcon'})
elif lopt['OPTIONS']['interpolation'] == 'nearest':
lopt['OPTIONS'].update({'interpolation': 'remapnn'})
else:
raise ValueError('ERROR: invalid interpolation method: \
%s' % lopt['OPTIONS']['interpolation'])
def _convert_options(self):
"""
Convert options, which are only strings in the beginning
to numerical values or execute functions to set directory
names appropriately
"""
for v in self.options.keys():
var = self.options[v]
for s in var.keys(): # each section
sec = var[s]
for k in sec.keys():
if k == 'start':
sec.update({k: pl.num2date(pl.datestr2num(sec[k]))})
elif k == 'stop':
sec.update({k: pl.num2date(pl.datestr2num(sec[k]))})
else:
# update current variable with valid value
sec.update({k: self.__convert(sec[k])})
def __convert(self, s):
"""
convert a single string into a valid value. The routine
recognizes if the string is numerical, or boolean
or if a command needs to be executed to generate a valid value
Parameters
----------
s : str
string with value of option
"""
s.replace('\n', '')
if len(s) == 0:
return None
#1) check for numerical value
try:
x = float(s)
return x
except:
pass
#2) check for boolean value
h = s.lstrip().rstrip()
if h.upper() == 'TRUE':
return True
if h.upper() == 'FALSE':
return False
#3) check if some code should be executed (specified by starting and trailing #)
if (h[0] == '#') and (h[-1] == '#'):
cdo = Cdo()
cmd = h[1:-1]
exec('res = ' + cmd)
return res
#4) check if a list is provided
if (h[0] == '[') and (h[-1] == ']'): # is list
return self.__get_list(s)
#5) in any other case return s
return s
def __get_list(self, s):
# return a list made out from a string '[1,2,3,4,5]'
l = s.replace('[', '')
l = l.replace(']', '')
l = l.split(',')
o = []
for i in l:
try:
o.append(float(i)) # try numerical conversion
except:
# else: return the string itself
o.append(i)
return o
def _check(self):
"""
check consistency of options specified
"""
cerr = 0
o = self.options
# specify here the options that need to be given!
# variables that need to be specified (MUST!) for each
# observational dataset
locopt = ['obs_file', 'obs_var', 'gleckler_position',
'scale_data']
globopt = ['cticks', 'map_difference', 'map_seasons',
'preprocess', 'reichler_plot', 'gleckler_plot',
'hovmoeller_plot', 'regional_analysis',
'interpolation', 'targetgrid', 'projection',
'global_mean', 'vmin', 'vmax', 'dmin', 'dmax',
'cmin', 'cmax', 'pattern_correlation'] # options for each variable type
# all variables
for v in o.keys():
d = o[v] # dictionary for a specific variable
if not 'OPTIONS' in d.keys():
sys.stdout.write('Error: missing OPTIONS %s' % v)
cerr += 1
# check global options
for k in globopt:
if not k in d['OPTIONS'].keys():
sys.stdout.write('Error: missing global option: %s (%s)' % (k, v))
cerr += 1
if k == 'cticks':
if isinstance(d['OPTIONS'][k], list):
if np.any(np.diff(d['OPTIONS'][k]) < 0):
raise ValueError('CTICKS are not in \
increasing order!')
else:
raise ValueError('CTICKS option needs to \
be a list')
if k == 'regional_analysis':
if d['OPTIONS'][k] is True:
if 'region_file' not in d['OPTIONS'].keys():
raise ValueError('ERROR: You need to provide a region file name if '
'you want to use regional_analysis!')
# check local options
# odat is key for a specific observational dataset
for odat in d.keys():
if odat.upper() == 'OPTIONS':
continue
# k is not the index for a specific obs. record
for k in locopt:
if not k in d[odat].keys():
sys.stdout.write('Error: missing local option: %s (%s,%s)' % (k, odat, v))
cerr += 1
if k == 'obs_file':
d[odat]['obs_file'] = d[odat]['obs_file'].rstrip()
if ((d[odat]['obs_file'][-1] == os.sep) or (d[odat]['obs_file'][-3:] == '.nc') or (d[odat]['obs_file'][-4:] == '.nc4')):
pass
else:
d[odat]['obs_file'] = d[odat]['obs_file'] + os.sep
# check if region file is given
if 'region_file' in d['OPTIONS'].keys():
if d['OPTIONS']['region_file'].lower() == 'none':
pass
elif len(d['OPTIONS']['region_file']) == 0:
pass
else:
if not os.path.exists(d['OPTIONS']['region_file']):
print d['OPTIONS']['region_file']
raise ValueError('Regional masking file not existing: %s' % d['OPTIONS']['region_file'])
else:
region_filename, region_file_varname = self._import_regional_file(d['OPTIONS']['region_file'], d['OPTIONS'].pop('region_file_varname', None), d['OPTIONS']['targetgrid'])
self.options[v]['OPTIONS'].update({'region_file': region_filename})
self.options[v]['OPTIONS'].update({'region_file_varname': region_file_varname})
if cerr > 0:
raise ValueError('There were errors in the initialization \
of plotting options!')
class CFGWriter(object):
"""
This class is supposed to provide a standardized interface to write
a pyCMBS configuration file
"""
def __init__(self, filename, generator='pyCMBS CONFIGURATION WRITER'):
"""
Parameters
---------
filename : str
filename of configuration file that shall be generated
generator : str
identifier of the caller of the class
"""
self.generator = generator
self.filename = filename
self.output_dir = os.path.dirname(filename)
if os.path.exists(filename):
os.remove(self.filename)
if not os.path.exists(self.output_dir):
os.makedirs(self.output_dir)
def save(self, temp_dir=None, vars=None, start_date=None,
stop_date=None, models=None, interval='monthly',
format='png', basemap=False, clean_temp=False,
summary_only=False):
"""
save configuration file
"""
# list which specifies which default variables should be written
# to the standard configuration file
supported_vars = ['albedo', 'sis', 'albedo_vis',
'albedo_nir', 'surface_upward_flux', 'tree',
'temperature', 'rain', 'evap', 'hair', 'wind',
'twpa', 'wvpa', 'late', 'budg',
'gpp']
# 'phenology_faPAR'
if format.lower() not in ['pdf', 'png']:
raise ValueError('Invalid output format for report: %s' % format)
if interval not in ['monthly', 'season']:
raise ValueError('Invalid interval option specified!')
if temp_dir is None:
raise ValueError('No temporary output directory specified!')
if vars is None:
raise ValueError('No VARIABLES specified!')
if start_date is None:
raise ValueError('No start_date specified')
if stop_date is None:
raise ValueError('No stop_date specified')
if models is None:
raise ValueError('No models specified!')
import time
self._write('######################################################')
self._write('# AUTOMATICALLY GENERATED configuration file for pyCMBS')
self._write('# https://code.zmaw.de/projects/pycmbs')
self._write('# generated by ' + self.generator)
self._write('#')
self._write('# generated at: ' + time.asctime())
self._write('######################################################')
self._write('basemap,' + str(basemap.real))
self._write('report=reportname_here')
self._write('report_format=' + format.upper())
self._write('author=TheAuthorName')
self._write('temp_dir=' + temp_dir)
self._write('clean_tempdir,' + str(clean_temp.real))
self._write('summary_only,' + str(summary_only.real))
self._write('config_dir=' + self.output_dir + '/configuration/')
if not os.path.exists(self.output_dir + '/configuration/'):
os.makedirs(self.output_dir + '/configuration/')
self._write('output_directory=' + self.output_dir + '/reports/')
self._write('')
self._write('################################')
self._write('# Specify variables to analyze')
self._write('#')
self._write("# comments are by '#'")
self._write('#')
self._write('# analysis details for each variable are:')
self._write('# name, [0,1], [monthly,season]')
self._write('#')
self._write("# 'name' specifies the variable name to be analyzed; needs to be consistent with routines defined"
" in main()")
self._write('# [0,1] specified if the data shall be used')
self._write('# [monthly,season] specifies the temporal scale of the analysis')
self._write('#')
self._write('################################')
for v in supported_vars:
if v in vars:
self._write(v + ',1,' + interval)
else:
self._write(v + ',0,' + interval)
self._write('')
self._write('################################')
self._write('# specify period to analyze')
self._write('# start-time YYYY-MM-DD')
self._write('# stop-time YYYY-MM-DD')
self._write('################################')
self._write(start_date)
self._write(stop_date)
self._write('use_for_observations,0')
self._write('')
self._write('################################')
self._write('# Register models to analyze')
self._write('# ID,TYPE,EXPERIMENET,PATH')
self._write('#')
self._write('# ID: unique ID to specify model, for CMIP5 ID is also part of the filenames!')
self._write('# TYPE: Type of model to be anaylzed (JSBACH_BOT, CMIP5, JSBACH_RAW)')
self._write('# EXPERIMENT: an experiment identifier')
self._write('# PATH: directory path where data is located')
self._write('#')
self._write('# The modes MUST NOT be separated with whitepsaces at the moment!')
self._write('################################')
self._write('')
self._write('#--- MODELS TO ANALYZE ---')
for i in xrange(len(models)):
self._write(models[i]['id'] + ',' + models[i]['type']
+ ',' + models[i]['experiment'] + ','
+ models[i]['path'])
def _write(self, s):
if os.path.exists(self.filename):
mode = 'a'
else:
mode = 'w'
f = open(self.filename, mode)
f.write(s + '\n')
f.close()
|
pygeo/pycmbs
|
pycmbs/benchmarking/config.py
|
Python
|
mit
| 31,798
|
[
"NetCDF"
] |
29fcac88d30c3c22c68c6e0072908e6b9fc912b58fe89a469f1a5ff2843ee72a
|
# $HeadURL$
__RCSID__ = "$Id$"
import os.path
import zlib
import zipfile
import threading, thread
import time
import DIRAC
from DIRAC.Core.Utilities import List, Time
from DIRAC.Core.Utilities.ReturnValues import S_OK, S_ERROR
from DIRAC.Core.Utilities.CFG import CFG
from DIRAC.Core.Utilities.LockRing import LockRing
from DIRAC.FrameworkSystem.Client.Logger import gLogger
class ConfigurationData:
def __init__( self, loadDefaultCFG = True ):
lr = LockRing()
self.threadingEvent = lr.getEvent()
self.threadingEvent.set()
self.threadingLock = lr.getLock()
self.runningThreadsNumber = 0
self.compressedConfigurationData = ""
self.configurationPath = "/DIRAC/Configuration"
self.backupsDir = os.path.join( DIRAC.rootPath, "etc", "csbackup" )
self._isService = False
self.localCFG = CFG()
self.remoteCFG = CFG()
self.mergedCFG = CFG()
self.remoteServerList = []
if loadDefaultCFG:
defaultCFGFile = os.path.join( DIRAC.rootPath, "etc", "dirac.cfg" )
gLogger.debug( "dirac.cfg should be at", "%s" % defaultCFGFile )
retVal = self.loadFile( defaultCFGFile )
if not retVal[ 'OK' ]:
gLogger.warn( "Can't load %s file" % defaultCFGFile )
self.sync()
def getBackupDir( self ):
return self.backupsDir
def sync( self ):
gLogger.debug( "Updating configuration internals" )
self.mergedCFG = self.remoteCFG.mergeWith( self.localCFG )
self.remoteServerList = []
localServers = self.extractOptionFromCFG( "%s/Servers" % self.configurationPath,
self.localCFG,
disableDangerZones = True )
if localServers:
self.remoteServerList.extend( List.fromChar( localServers, "," ) )
remoteServers = self.extractOptionFromCFG( "%s/Servers" % self.configurationPath,
self.remoteCFG,
disableDangerZones = True )
if remoteServers:
self.remoteServerList.extend( List.fromChar( remoteServers, "," ) )
self.remoteServerList = List.uniqueElements( self.remoteServerList )
self.compressedConfigurationData = zlib.compress( str( self.remoteCFG ), 9 )
def loadFile( self, fileName ):
try:
fileCFG = CFG()
fileCFG.loadFromFile( fileName )
except IOError:
self.localCFG = self.localCFG.mergeWith( fileCFG )
return S_ERROR( "Can't load a cfg file '%s'" % fileName )
return self.mergeWithLocal( fileCFG )
def mergeWithLocal( self, extraCFG ):
self.lock()
try:
self.localCFG = self.localCFG.mergeWith( extraCFG )
self.unlock()
gLogger.debug( "CFG merged" )
except Exception, e:
self.unlock()
return S_ERROR( "Cannot merge with new cfg: %s" % str( e ) )
self.sync()
return S_OK()
def loadRemoteCFGFromCompressedMem( self, data ):
sUncompressedData = zlib.decompress( data )
self.loadRemoteCFGFromMem( sUncompressedData )
def loadRemoteCFGFromMem( self, data ):
self.lock()
self.remoteCFG.loadFromBuffer( data )
self.unlock()
self.sync()
def loadConfigurationData( self, fileName = False ):
name = self.getName()
self.lock()
try:
if not fileName:
fileName = "%s.cfg" % name
if fileName[0] != "/":
fileName = os.path.join( DIRAC.rootPath, "etc", fileName )
self.remoteCFG.loadFromFile( fileName )
except Exception, e:
print e
pass
self.unlock()
self.sync()
def getCommentFromCFG( self, path, cfg = False ):
if not cfg:
cfg = self.mergedCFG
self.dangerZoneStart()
try:
levelList = [ level.strip() for level in path.split( "/" ) if level.strip() != "" ]
for section in levelList[:-1]:
cfg = cfg[ section ]
return self.dangerZoneEnd( cfg.getComment( levelList[-1] ) )
except Exception:
pass
return self.dangerZoneEnd( None )
def getSectionsFromCFG( self, path, cfg = False, ordered = False ):
if not cfg:
cfg = self.mergedCFG
self.dangerZoneStart()
try:
levelList = [ level.strip() for level in path.split( "/" ) if level.strip() != "" ]
for section in levelList:
cfg = cfg[ section ]
return self.dangerZoneEnd( cfg.listSections( ordered ) )
except Exception:
pass
return self.dangerZoneEnd( None )
def getOptionsFromCFG( self, path, cfg = False, ordered = False ):
if not cfg:
cfg = self.mergedCFG
self.dangerZoneStart()
try:
levelList = [ level.strip() for level in path.split( "/" ) if level.strip() != "" ]
for section in levelList:
cfg = cfg[ section ]
return self.dangerZoneEnd( cfg.listOptions( ordered ) )
except Exception:
pass
return self.dangerZoneEnd( None )
def extractOptionFromCFG( self, path, cfg = False, disableDangerZones = False ):
if not cfg:
cfg = self.mergedCFG
if not disableDangerZones:
self.dangerZoneStart()
try:
levelList = [ level.strip() for level in path.split( "/" ) if level.strip() != "" ]
for section in levelList[:-1]:
cfg = cfg[ section ]
if levelList[-1] in cfg.listOptions():
return self.dangerZoneEnd( cfg[ levelList[ -1 ] ] )
except Exception:
pass
if not disableDangerZones:
self.dangerZoneEnd()
def setOptionInCFG( self, path, value, cfg = False, disableDangerZones = False ):
if not cfg:
cfg = self.localCFG
if not disableDangerZones:
self.dangerZoneStart()
try:
levelList = [ level.strip() for level in path.split( "/" ) if level.strip() != "" ]
for section in levelList[:-1]:
if section not in cfg.listSections():
cfg.createNewSection( section )
cfg = cfg[ section ]
cfg.setOption( levelList[ -1 ], value )
finally:
if not disableDangerZones:
self.dangerZoneEnd()
self.sync()
def deleteOptionInCFG( self, path, cfg = False ):
if not cfg:
cfg = self.localCFG
self.dangerZoneStart()
try:
levelList = [ level.strip() for level in path.split( "/" ) if level.strip() != "" ]
for section in levelList[:-1]:
if section not in cfg.listSections():
return
cfg = cfg[ section ]
cfg.deleteKey( levelList[ -1 ] )
finally:
self.dangerZoneEnd()
self.sync()
def generateNewVersion( self ):
self.setVersion( Time.toString() )
self.sync()
gLogger.info( "Generated new version %s" % self.getVersion() )
def setVersion( self, version, cfg = False ):
if not cfg:
cfg = self.remoteCFG
self.setOptionInCFG( "%s/Version" % self.configurationPath,
version,
cfg )
def getVersion( self, cfg = False ):
if not cfg:
cfg = self.remoteCFG
value = self.extractOptionFromCFG( "%s/Version" % self.configurationPath,
cfg )
if value:
return value
return "0"
def getName( self ):
return self.extractOptionFromCFG( "%s/Name" % self.configurationPath,
self.mergedCFG )
def exportName( self ):
return self.setOptionInCFG( "%s/Name" % self.configurationPath,
self.getName(),
self.remoteCFG )
def getRefreshTime( self ):
try:
return int( self.extractOptionFromCFG( "%s/RefreshTime" % self.configurationPath,
self.mergedCFG ) )
except:
return 300
def getPropagationTime( self ):
try:
return int( self.extractOptionFromCFG( "%s/PropagationTime" % self.configurationPath,
self.mergedCFG ) )
except:
return 300
def getSlavesGraceTime( self ):
try:
return int( self.extractOptionFromCFG( "%s/SlavesGraceTime" % self.configurationPath,
self.mergedCFG ) )
except:
return 600
def mergingEnabled( self ):
try:
val = self.extractOptionFromCFG( "%s/EnableAutoMerge" % self.configurationPath,
self.mergedCFG )
return val.lower() in ( "yes", "true", "y" )
except:
return False
def getAutoPublish( self ):
value = self.extractOptionFromCFG( "%s/AutoPublish" % self.configurationPath,
self.localCFG )
if value and value.lower() in ( "no", "false", "n" ):
return False
else:
return True
def getServers( self ):
return list( self.remoteServerList )
def getConfigurationGateway( self ):
return self.extractOptionFromCFG( "/DIRAC/Gateway",
self.localCFG )
def setServers( self, sServers ):
self.setOptionInCFG( "%s/Servers" % self.configurationPath,
sServers,
self.remoteCFG )
self.sync()
def deleteLocalOption( self, optionPath ):
self.deleteOptionInCFG( optionPath, self.localCFG )
def getMasterServer( self ):
return self.extractOptionFromCFG( "%s/MasterServer" % self.configurationPath,
self.remoteCFG )
def setMasterServer( self, sURL ):
self.setOptionInCFG( "%s/MasterServer" % self.configurationPath,
sURL,
self.remoteCFG )
self.sync()
def getCompressedData( self ):
return self.compressedConfigurationData
def isMaster( self ):
value = self.extractOptionFromCFG( "%s/Master" % self.configurationPath,
self.localCFG )
if value and value.lower() in ( "yes", "true", "y" ):
return True
else:
return False
def getServicesPath( self ):
return "/Services"
def setAsService( self ):
self._isService = True
def isService( self ):
return self._isService
def useServerCertificate( self ):
value = self.extractOptionFromCFG( "/DIRAC/Security/UseServerCertificate" )
if value and value.lower() in ( "y", "yes", "true" ):
return True
return False
def skipCACheck( self ):
value = self.extractOptionFromCFG( "/DIRAC/Security/SkipCAChecks" )
if value and value.lower() in ( "y", "yes", "true" ):
return True
return False
def dumpLocalCFGToFile( self, fileName ):
try:
fd = open( fileName, "w" )
fd.write( str( self.localCFG ) )
fd.close()
gLogger.verbose( "Configuration file dumped", "'%s'" % fileName )
except IOError:
gLogger.error( "Can't dump cfg file", "'%s'" % fileName )
return S_ERROR( "Can't dump cfg file '%s'" % fileName )
return S_OK()
def getRemoteCFG( self ):
return self.remoteCFG
def getMergedCFGAsString( self ):
return str( self.mergedCFG )
def dumpRemoteCFGToFile( self, fileName ):
fd = open( fileName, "w" )
fd.write( str( self.remoteCFG ) )
fd.close()
def __backupCurrentConfiguration( self, backupName ):
configurationFilename = "%s.cfg" % self.getName()
configurationFile = os.path.join( DIRAC.rootPath, "etc", configurationFilename )
today = Time.date()
backupPath = os.path.join( self.getBackupDir(), str( today.year ), "%02d" % today.month )
try:
os.makedirs( backupPath )
except:
pass
backupFile = os.path.join( backupPath, configurationFilename.replace( ".cfg", ".%s.zip" % backupName ) )
if os.path.isfile( configurationFile ):
gLogger.info( "Making a backup of configuration in %s" % backupFile )
try:
zf = zipfile.ZipFile( backupFile, "w", zipfile.ZIP_DEFLATED )
zf.write( configurationFile, "%s.backup.%s" % ( os.path.split( configurationFile )[1], backupName ) )
zf.close()
except Exception:
gLogger.exception()
gLogger.error( "Cannot backup configuration data file",
"file %s" % backupFile )
else:
gLogger.warn( "CS data file does not exist", configurationFile )
def writeRemoteConfigurationToDisk( self, backupName = False ):
configurationFile = os.path.join( DIRAC.rootPath, "etc", "%s.cfg" % self.getName() )
try:
fd = open( configurationFile, "w" )
fd.write( str( self.remoteCFG ) )
fd.close()
except Exception, e:
gLogger.fatal( "Cannot write new configuration to disk!",
"file %s" % configurationFile )
return S_ERROR( "Can't write cs file %s!: %s" % ( configurationFile, str( e ) ) )
if backupName:
self.__backupCurrentConfiguration( backupName )
return S_OK()
def setRemoteCFG( self, cfg, disableSync = False ):
self.remoteCFG = cfg.clone()
if not disableSync:
self.sync()
def lock( self ):
"""
Locks Event to prevent further threads from reading.
Stops current thread until no other thread is accessing.
PRIVATE USE
"""
self.threadingEvent.clear()
while self.runningThreadsNumber > 0:
time.sleep( 0.1 )
def unlock( self ):
"""
Unlocks Event.
PRIVATE USE
"""
self.threadingEvent.set()
def dangerZoneStart( self ):
"""
Start of danger zone. This danger zone may be or may not be a mutual exclusion zone.
Counter is maintained to know how many threads are inside and be able to enable and disable mutual exclusion.
PRIVATE USE
"""
self.threadingEvent.wait()
self.threadingLock.acquire()
self.runningThreadsNumber += 1
try:
self.threadingLock.release()
except thread.error:
pass
def dangerZoneEnd( self, returnValue = None ):
"""
End of danger zone.
PRIVATE USE
"""
self.threadingLock.acquire()
self.runningThreadsNumber -= 1
try:
self.threadingLock.release()
except thread.error:
pass
return returnValue
|
Sbalbp/DIRAC
|
ConfigurationSystem/private/ConfigurationData.py
|
Python
|
gpl-3.0
| 13,900
|
[
"DIRAC"
] |
fbd2adf6ec798ed87e21fa70f29cd36e0b5b071f6e5e894c4174f4451d067960
|
"""
This example displays the trajectories for the Lorenz system of
equations using mlab along with the z-nullcline. It provides a simple
UI where a user can change the parameters and the system of equations on
the fly. This primarily demonstrates how one can build powerful tools
with a UI using Traits and Mayavi.
For explanations and more examples of interactive application building
with Mayavi, please refer to section :ref:`builing_applications`.
"""
# Author: Prabhu Ramachandran <prabhu@aero.iitb.ac.in>
# Copyright (c) 2008-2009, Enthought, Inc.
# License: BSD Style.
import numpy as np
import scipy
from traits.api import HasTraits, Range, Instance, \
on_trait_change, Array, Tuple, Str
from traitsui.api import View, Item, HSplit, Group
from mayavi import mlab
from mayavi.core.ui.api import MayaviScene, MlabSceneModel, \
SceneEditor
################################################################################
# `Lorenz` class.
################################################################################
class Lorenz(HasTraits):
# The parameters for the Lorenz system, defaults to the standard ones.
s = Range(0.0, 20.0, 10.0, desc='the parameter s', enter_set=True,
auto_set=False)
r = Range(0.0, 50.0, 28.0, desc='the parameter r', enter_set=True,
auto_set=False)
b = Range(0.0, 10.0, 8./3., desc='the parameter b', enter_set=True,
auto_set=False)
# These expressions are evaluated to compute the right hand sides of
# the ODE. Defaults to the Lorenz system.
u = Str('s*(y-x)', desc='the x component of the velocity',
auto_set=False, enter_set=True)
v = Str('r*x - y - x*z', desc='the y component of the velocity',
auto_set=False, enter_set=True)
w = Str('x*y - b*z', desc='the z component of the velocity',
auto_set=False, enter_set=True)
# Tuple of x, y, z arrays where the field is sampled.
points = Tuple(Array, Array, Array)
# The mayavi(mlab) scene.
scene = Instance(MlabSceneModel, args=())
# The "flow" which is a Mayavi streamline module.
flow = Instance(HasTraits)
########################################
# The UI view to show the user.
view = View(HSplit(
Group(
Item('scene', editor=SceneEditor(scene_class=MayaviScene),
height=500, width=500, show_label=False)),
Group(
Item('s'),
Item('r'),
Item('b'),
Item('u'), Item('v'), Item('w')),
),
resizable=True
)
######################################################################
# Trait handlers.
######################################################################
# Note that in the `on_trait_change` call below we listen for the
# `scene.activated` trait. This conveniently ensures that the flow
# is generated as soon as the mlab `scene` is activated (which
# happens when the configure/edit_traits method is called). This
# eliminates the need to manually call the `update_flow` method etc.
@on_trait_change('s, r, b, scene.activated')
def update_flow(self):
x, y, z = self.points
u, v, w = self.get_uvw()
self.flow.mlab_source.set(u=u, v=v, w=w)
@on_trait_change('u')
def update_u(self):
self.flow.mlab_source.set(u=self.get_vel('u'))
@on_trait_change('v')
def update_v(self):
self.flow.mlab_source.set(v=self.get_vel('v'))
@on_trait_change('w')
def update_w(self):
self.flow.mlab_source.set(w=self.get_vel('w'))
def get_uvw(self):
return self.get_vel('u'), self.get_vel('v'), self.get_vel('w')
def get_vel(self, comp):
"""This function basically evaluates the user specified system
of equations using scipy.
"""
func_str = getattr(self, comp)
try:
g = scipy.__dict__
x, y, z = self.points
s, r, b = self.s, self.r, self.b
val = eval(func_str, g,
{'x': x, 'y': y, 'z': z,
's':s, 'r':r, 'b': b})
except:
# Mistake, so return the original value.
val = getattr(self.flow.mlab_source, comp)
return val
######################################################################
# Private interface.
######################################################################
def _points_default(self):
x, y, z = np.mgrid[-50:50:100j,-50:50:100j,-10:60:70j]
return x, y, z
def _flow_default(self):
x, y, z = self.points
u, v, w = self.get_uvw()
f = self.scene.mlab.flow(x, y, z, u, v, w)
f.stream_tracer.integration_direction = 'both'
f.stream_tracer.maximum_propagation = 200
src = f.mlab_source.m_data
o = mlab.outline()
mlab.view(120, 60, 150)
return f
if __name__ == '__main__':
# Instantiate the class and configure its traits.
lor = Lorenz()
lor.configure_traits()
|
skulumani/asteroid_dumbbell
|
integration/lorenz_ui.py
|
Python
|
gpl-3.0
| 5,207
|
[
"Mayavi"
] |
5b54ed7d5f9c70e21962634c64ada6855432ba7fa7aa538652c12732e087c25f
|
# coding=utf-8
from __future__ import division
import math
import os
from copy import deepcopy
from ladybug_geometry.geometry3d.pointvector import Vector3D
from .analysisperiod import AnalysisPeriod
from .datacollection import HourlyContinuousCollection, HourlyDiscontinuousCollection
from .datatype.energyflux import Irradiance, GlobalHorizontalIrradiance, \
DirectNormalIrradiance, DiffuseHorizontalIrradiance, DirectHorizontalIrradiance
from .datatype.illuminance import GlobalHorizontalIlluminance, \
DirectNormalIlluminance, DiffuseHorizontalIlluminance
from .datatype.luminance import ZenithLuminance
from .dt import DateTime, Time
from .epw import EPW
from .futil import write_to_file
from .header import Header
from .location import Location
from .skymodel import ashrae_revised_clear_sky, ashrae_clear_sky, \
zhang_huang_solar_split, estimate_illuminance_from_irradiance
from .stat import STAT
from .sunpath import Sunpath
try: # python 2
from itertools import izip as zip
readmode = 'rb'
writemode = 'wb'
except ImportError: # python 3
xrange = range
readmode = 'r'
writemode = 'w'
xrange = range
class Wea(object):
"""A WEA object containing hourly or sub-hourly solar irradiance.
This object and its corresponding .wea file type is what the Radiance gendaymtx
function uses to generate the sky.
Args:
location: Ladybug location object.
direct_normal_irradiance: A HourlyContinuousCollection or a
HourlyDiscontinuousCollection for direct normal irradiance. The
collection must be aligned with the diffuse_horizontal_irradiance.
diffuse_horizontal_irradiance: A HourlyContinuousCollection or a
HourlyDiscontinuousCollection for diffuse horizontal irradiance, The
collection must be aligned with the direct_normal_irradiance.
Properties:
* location
* direct_normal_irradiance
* diffuse_horizontal_irradiance
* direct_horizontal_irradiance
* global_horizontal_irradiance
* enforce_on_hour
* datetimes
* hoys
* analysis_period
* timestep
* is_leap_year
* is_continuous
* is_annual
* header
"""
__slots__ = \
('_timestep', '_is_leap_year', '_location', 'metadata', '_enforce_on_hour',
'_direct_normal_irradiance', '_diffuse_horizontal_irradiance')
def __init__(self, location, direct_normal_irradiance, diffuse_horizontal_irradiance):
"""Create a Wea object."""
# Check that input collections are of the right type and aligned to each other
acceptable_colls = (HourlyContinuousCollection, HourlyDiscontinuousCollection)
for coll in (direct_normal_irradiance, diffuse_horizontal_irradiance):
assert isinstance(coll, acceptable_colls), 'Input irradiance data for ' \
'Wea must be an hourly data collection. Got {}.'.format(type(coll))
assert direct_normal_irradiance.is_collection_aligned(
diffuse_horizontal_irradiance), 'Wea direct normal and diffuse horizontal ' \
'irradiance collections must be aligned with one another.'
# assign the location, irradiance, metadata, timestep and leap year
self._enforce_on_hour = False # False by default
self.location = location
self._direct_normal_irradiance = direct_normal_irradiance
self._diffuse_horizontal_irradiance = diffuse_horizontal_irradiance
self.metadata = {'source': location.source, 'country': location.country,
'city': location.city}
self._timestep = direct_normal_irradiance.header.analysis_period.timestep
self._is_leap_year = direct_normal_irradiance.header.analysis_period.is_leap_year
@classmethod
def from_annual_values(cls, location, direct_normal_irradiance,
diffuse_horizontal_irradiance, timestep=1, is_leap_year=False):
"""Create an annual Wea from an array of irradiance values.
Args:
location: Ladybug location object.
direct_normal_irradiance: An array of values for direct normal irradiance.
The length of this list should be same as diffuse_horizontal_irradiance
and should represent an entire year of values at the input timestep.
diffuse_horizontal_irradiance: A HourlyContinuousCollection or a
HourlyDiscontinuousCollection for diffuse horizontal irradiance, The
collection must be aligned with the direct_normal_irradiance.
timestep: An integer to set the number of time steps per hour.
Default is 1 for one value per hour.
is_leap_year: A boolean to indicate if values are for a leap
year. (Default: False).
"""
metadata = {'source': location.source, 'country': location.country,
'city': location.city}
dnr, dhr = cls._get_data_collections(
direct_normal_irradiance, diffuse_horizontal_irradiance,
metadata, timestep, is_leap_year)
return cls(location, dnr, dhr)
@classmethod
def from_dict(cls, data):
""" Create Wea from a dictionary
Args:
data: A python dictionary in the following format
.. code-block:: python
{
"type": "Wea",
"location": {}, # ladybug location dictionary
"direct_normal_irradiance": [], # direct normal irradiance values
"diffuse_horizontal_irradiance": [], # diffuse horizontal irradiance values
"timestep": 1, # optional timestep between measurements
"is_leap_year": False, # optional boolean for leap year
"datetimes": [] # array of datetime arrays; only required when not annual
}
"""
# check for the required keys
required_keys = ('type', 'location', 'direct_normal_irradiance',
'diffuse_horizontal_irradiance')
for key in required_keys:
assert key in data, 'Required key "{}" is missing!'.format(key)
assert data['type'] == 'Wea', \
'Expected Wea dictionary. Got {}.'.format(data['type'])
# set the optional properties
timestep = data['timestep'] if 'timestep' in data else 1
is_leap_year = data['is_leap_year'] if 'is_leap_year' in data else False
# correctly interpret the datetimes to create correct analysis periods
continuous = True
if 'datetimes' in data and data['datetimes'] is not None:
st_dt = DateTime.from_array(data['datetimes'][0])
end_dt = DateTime.from_array(data['datetimes'][-1])
if st_dt.leap_year is not is_leap_year:
st_dt = DateTime(st_dt.month, st_dt.day, st_dt.hour, is_leap_year)
end_dt = DateTime(end_dt.month, end_dt.day, end_dt.hour, is_leap_year)
a_per = AnalysisPeriod.from_start_end_datetime(st_dt, end_dt, timestep)
if a_per.st_hour != 0 or a_per.end_hour != 23:
continuous = False
if len(a_per) != len(data['direct_normal_irradiance']):
a_per = AnalysisPeriod(timestep=timestep, is_leap_year=is_leap_year)
continuous = False
else: # assume it is annual continuous data
a_per = AnalysisPeriod(timestep=timestep, is_leap_year=is_leap_year)
# serialize the location and data collections
location = Location.from_dict(data['location'])
dni_head = Header(DirectNormalIrradiance(), 'W/m2', a_per)
dhi_head = Header(DiffuseHorizontalIrradiance(), 'W/m2', a_per)
if continuous:
dni = HourlyContinuousCollection(
dni_head, data['direct_normal_irradiance'])
dhi = HourlyContinuousCollection(
dhi_head, data['diffuse_horizontal_irradiance'])
else:
datetimes = [DateTime.from_array(dat) for dat in data['datetimes']]
dni = HourlyDiscontinuousCollection(
dni_head, data['direct_normal_irradiance'], datetimes)
dhi = HourlyDiscontinuousCollection(
dhi_head, data['diffuse_horizontal_irradiance'], datetimes)
return cls(location, dni, dhi)
@classmethod
def from_file(cls, wea_file, timestep=1, is_leap_year=False):
"""Create Wea object from a .wea file.
Args:
wea_file:Full path to .wea file.
timestep: An optional integer to set the number of time steps per hour.
Default is 1 for one value per hour. If the wea file has a time step
smaller than an hour, adjust this input accordingly.
is_leap_year: A boolean to indicate if values are for a leap
year. (Default: False).
"""
assert os.path.isfile(wea_file), 'Failed to find {}'.format(wea_file)
with open(wea_file, readmode) as weaf:
location = cls._parse_wea_header(weaf, wea_file)
# parse irradiance values
dir_norm_irr = []
dif_horiz_irr = []
dt_arr = []
for line in weaf:
vals = line.split()
dir_norm_irr.append(float(vals[-2]))
dif_horiz_irr.append(float(vals[-1]))
dt_arr.append([int(vals[0]), int(vals[1]), float(vals[2])])
# interpret datetimes to create data collections with correct analysis periods
continuous = True
st_dt = DateTime.from_array([dt_arr[0][0], dt_arr[0][1], int(dt_arr[0][2])])
end_dt = DateTime.from_array([dt_arr[-1][0], dt_arr[-1][1], int(dt_arr[-1][2])])
if st_dt.leap_year is not is_leap_year:
st_dt = DateTime(st_dt.month, st_dt.day, st_dt.hour, is_leap_year)
end_dt = DateTime(end_dt.month, end_dt.day, end_dt.hour, is_leap_year)
a_per = AnalysisPeriod.from_start_end_datetime(st_dt, end_dt, timestep)
if a_per.st_hour != 0 or a_per.end_hour != 23: # potential continuous time slice
continuous = False
if len(a_per) != len(dir_norm_irr): # true discontinuous data
a_per = AnalysisPeriod(timestep=timestep, is_leap_year=is_leap_year)
continuous = False
# serialize the data collections
metadata = {'city': location.city}
dni_head = Header(DirectNormalIrradiance(), 'W/m2', a_per, metadata)
dhi_head = Header(DiffuseHorizontalIrradiance(), 'W/m2', a_per, metadata)
if continuous:
dni = HourlyContinuousCollection(dni_head, dir_norm_irr)
dhi = HourlyContinuousCollection(dhi_head, dif_horiz_irr)
else:
if timestep == 1:
datetimes = [DateTime(d[0], d[1], int(d[2])) for d in dt_arr]
else:
datetimes = []
for d in dt_arr:
tim = Time.from_mod(int(d[2] * 60))
datetimes.append(DateTime(d[0], d[1], tim.hour, tim.minute))
dni = HourlyDiscontinuousCollection(dni_head, dir_norm_irr, datetimes)
dhi = HourlyDiscontinuousCollection(dhi_head, dif_horiz_irr, datetimes)
dni = dni.validate_analysis_period()
dhi = dhi.validate_analysis_period()
return cls(location, dni, dhi)
@classmethod
def from_daysim_file(cls, wea_file, timestep=1, is_leap_year=False):
"""Create Wea object from a .wea file produced by DAYSIM.
Note that this method is only required when the .wea file generated from
DAYSIM has a timestep greater than 1, which results in the file using
times of day greater than 23:59. DAYSIM wea's with a timestep of 1 can
use the from_file method without issues.
Args:
wea_file:Full path to .wea file.
timestep: An optional integer to set the number of time steps per hour.
Default is 1 for one value per hour.
is_leap_year: A boolean to indicate if values are for a leap
year. (Default: False).
"""
# parse in the data
assert os.path.isfile(wea_file), 'Failed to find {}'.format(wea_file)
with open(wea_file, readmode) as weaf:
location = cls._parse_wea_header(weaf, wea_file)
# parse irradiance values
dir_norm_irr = []
dif_horiz_irr = []
for line in weaf:
dirn, difh = [int(v) for v in line.split()[-2:]]
dir_norm_irr.append(dirn)
dif_horiz_irr.append(difh)
# move the last half hour of data to the start of the file
if timestep != 1:
shift = -int(timestep / 2)
dir_norm_irr = dir_norm_irr[shift:] + dir_norm_irr[:shift]
dif_horiz_irr = dif_horiz_irr[shift:] + dif_horiz_irr[:shift]
return cls.from_annual_values(
location, dir_norm_irr, dif_horiz_irr, timestep, is_leap_year)
@classmethod
def from_epw_file(cls, epw_file, timestep=1):
"""Create a wea object using the solar irradiance values in an epw file.
Args:
epw_file: Full path to epw weather file.
timestep: An optional integer to set the number of time steps per hour.
Default is 1 for one value per hour. Note that this input
will only do a linear interpolation over the data in the EPW
file. While such linear interpolations are suitable for most
thermal simulations, where thermal lag "smooths over" the effect
of momentary increases in solar energy, it is not recommended
for daylight simulations, where momentary increases in solar
energy can mean the difference between glare and visual comfort.
"""
epw = EPW(epw_file)
direct_normal, diffuse_horizontal = \
cls._get_data_collections(epw.direct_normal_radiation.values,
epw.diffuse_horizontal_radiation.values,
epw.metadata, 1, epw.is_leap_year)
if timestep != 1:
print("Note: timesteps greater than 1 on epw-generated Wea's \n"
"are suitable for thermal models but are not recommended \n"
"for daylight models.")
# interpolate the data
direct_normal = direct_normal.interpolate_to_timestep(timestep)
diffuse_horizontal = diffuse_horizontal.interpolate_to_timestep(timestep)
# create sunpath to check if the sun is up at a given timestep
sp = Sunpath.from_location(epw.location)
# add correct values to the empty data collection
for i, dt in enumerate(cls._get_datetimes(timestep, epw.is_leap_year)):
# set irradiance values to 0 when the sun is not up
sun = sp.calculate_sun_from_date_time(dt)
if sun.altitude < 0:
direct_normal[i] = 0
diffuse_horizontal[i] = 0
return cls(epw.location, direct_normal, diffuse_horizontal)
@classmethod
def from_stat_file(cls, statfile, timestep=1, is_leap_year=False):
"""Create an ASHRAE Revised Clear Sky Wea object from data in .stat file.
The .stat file must have monthly sky optical depths within it in order to
create a Wea this way.
Args:
statfile: Full path to the .stat file.
timestep: An optional integer to set the number of time steps per
hour. Default is 1 for one value per hour.
is_leap_year: A boolean to indicate if values are for a leap
year. (Default: False).
"""
stat = STAT(statfile)
# check to be sure the stat file does not have missing tau values
def check_missing(opt_data, data_name):
if opt_data == []:
raise ValueError('Stat file contains no optical data.')
for i, x in enumerate(opt_data):
if x is None:
raise ValueError(
'Missing optical depth data for {} at month {}'.format(
data_name, i)
)
check_missing(stat.monthly_tau_beam, 'monthly_tau_beam')
check_missing(stat.monthly_tau_diffuse, 'monthly_tau_diffuse')
return cls.from_ashrae_revised_clear_sky(stat.location, stat.monthly_tau_beam,
stat.monthly_tau_diffuse, timestep,
is_leap_year)
@classmethod
def from_ashrae_revised_clear_sky(cls, location, monthly_tau_beam,
monthly_tau_diffuse, timestep=1,
is_leap_year=False):
"""Create a wea object representing an ASHRAE Revised Clear Sky ("Tau Model")
ASHRAE Revised Clear Skies are intended to determine peak solar load
and sizing parmeters for HVAC systems. The revised clear sky is
currently the default recommended sky model used to autosize HVAC
systems in EnergyPlus. For more information on the ASHRAE Revised Clear
Sky model, see the EnergyPlus Engineering Reference:
https://bigladdersoftware.com/epx/docs/8-9/engineering-reference/climate-calculations.html
Args:
location: Ladybug location object.
monthly_tau_beam: A list of 12 float values indicating the beam
optical depth of the sky at each month of the year.
monthly_tau_diffuse: A list of 12 float values indicating the
diffuse optical depth of the sky at each month of the year.
timestep: An optional integer to set the number of time steps per
hour. Default is 1 for one value per hour.
is_leap_year: A boolean to indicate if values are for a leap
year. (Default: False).
"""
# extract metadata
metadata = {'source': location.source, 'country': location.country,
'city': location.city}
# create sunpath and get altitude at every timestep of the year
sp = Sunpath.from_location(location)
sp.is_leap_year = is_leap_year
altitudes = [[] for i in range(12)]
dates = cls._get_datetimes(timestep, is_leap_year)
for t_date in dates:
sun = sp.calculate_sun_from_date_time(t_date)
altitudes[sun.datetime.month - 1].append(sun.altitude)
# run all of the months through the ashrae_revised_clear_sky model
direct_norm, diffuse_horiz = [], []
for i_mon, alt_list in enumerate(altitudes):
dir_norm_rad, dif_horiz_rad = ashrae_revised_clear_sky(
alt_list, monthly_tau_beam[i_mon], monthly_tau_diffuse[i_mon])
direct_norm.extend(dir_norm_rad)
diffuse_horiz.extend(dif_horiz_rad)
direct_norm_rad, diffuse_horiz_rad = \
cls._get_data_collections(direct_norm, diffuse_horiz,
metadata, timestep, is_leap_year)
return cls(location, direct_norm_rad, diffuse_horiz_rad)
@classmethod
def from_ashrae_clear_sky(cls, location, sky_clearness=1, timestep=1,
is_leap_year=False):
"""Create a wea object representing an original ASHRAE Clear Sky.
The original ASHRAE Clear Sky is intended to determine peak solar load
and sizing parmeters for HVAC systems. It is not the sky model
currently recommended by ASHRAE since it usually overestimates the
amount of solar irradiance in comparison to the newer ASHRAE Revised
Clear Sky ("Tau Model"). However, the original model here is still
useful for cases where monthly optical depth values are not known. For
more information on the ASHRAE Clear Sky model, see the EnergyPlus
Engineering Reference:
https://bigladdersoftware.com/epx/docs/8-9/engineering-reference/climate-calculations.html
Args:
location: Ladybug location object.
sky_clearness: A factor that will be multiplied by the output of
the model. This is to help account for locations where clear,
dry skies predominate (e.g., at high elevations) or,
conversely, where hazy and humid conditions are frequent. See
Threlkeld and Jordan (1958) for recommended values. Typical
values range from 0.95 to 1.05 and are usually never more
than 1.2. Default is set to 1.0.
timestep: An optional integer to set the number of time steps per
hour. Default is 1 for one value per hour.
is_leap_year: A boolean to indicate if values are for a leap
year. (Default: False).
"""
# extract metadata
metadata = {'source': location.source, 'country': location.country,
'city': location.city}
# create sunpath and get altitude at every timestep of the year
sp = Sunpath.from_location(location)
sp.is_leap_year = is_leap_year
altitudes = [[] for i in range(12)]
dates = cls._get_datetimes(timestep, is_leap_year)
for t_date in dates:
sun = sp.calculate_sun_from_date_time(t_date)
altitudes[sun.datetime.month - 1].append(sun.altitude)
# compute hourly direct normal and diffuse horizontal irradiance
direct_norm, diffuse_horiz = [], []
for i_mon, alt_list in enumerate(altitudes):
dir_norm_rad, dif_horiz_rad = ashrae_clear_sky(
alt_list, i_mon + 1, sky_clearness)
direct_norm.extend(dir_norm_rad)
diffuse_horiz.extend(dif_horiz_rad)
direct_norm_rad, diffuse_horiz_rad = \
cls._get_data_collections(direct_norm, diffuse_horiz,
metadata, timestep, is_leap_year)
return cls(location, direct_norm_rad, diffuse_horiz_rad)
@classmethod
def from_zhang_huang_solar(cls, location, cloud_cover, relative_humidity,
dry_bulb_temperature, wind_speed,
atmospheric_pressure=None, use_disc=False):
"""Create a Wea object from climate data using the Zhang-Huang model.
The Zhang-Huang solar model was developed to estimate solar
irradiance for weather stations that lack such values, which are
typically colleted with a pyranometer. Using total cloud cover,
dry-bulb temperature, relative humidity, and wind speed as
inputs the Zhang-Huang estimates global horizontal irradiance
by means of a regression model across these variables.
For more information on the Zhang-Huang model, see the
EnergyPlus Engineering Reference:
https://bigladdersoftware.com/epx/docs/8-7/engineering-reference/climate-calculations.html#zhang-huang-solar-model
Args:
location: Ladybug location object.
cloud_cover: A hourly continuous data collection with values for the
fraction of the sky dome covered in clouds (0 = clear;
1 = completely overcast).
relative_humidity: A hourly continuous data collection with values for
the relative humidity in percent.
dry_bulb_temperature: A hourly continuous data collection with values
for the dry bulb temperature in degrees Celsius.
wind_speed: A hourly continuous data collection with values for the
wind speed in meters per second.
atmospheric_pressure: An optional hourly continuous data collection
with values for the atmospheric pressure in Pa. If None, pressure
at sea level will be used (101325 Pa). (Default: None)
use_disc: Boolean to note whether the original DISC model as opposed to the
newer and more accurate DIRINT model. (Default: False).
"""
# Check that input collections are of the right type and aligned to each other
colls = (cloud_cover, relative_humidity, dry_bulb_temperature, wind_speed)
for coll in colls:
assert isinstance(coll, HourlyContinuousCollection), 'Input data for Zhang' \
'-Huang Wea must be an hourly continuous. Got {}.'.format(type(coll))
assert cloud_cover.are_collections_aligned(colls), 'Zhang-Huang Wea input ' \
'data collections must be aligned with one another.'
# check atmospheric_pressure input and generate default if None
if atmospheric_pressure is not None:
assert cloud_cover.is_collection_aligned(atmospheric_pressure), \
'length pf atmospheric_pressure must match the other input collections.'
atm_pressure = atmospheric_pressure.values
else:
atm_pressure = [101325] * len(cloud_cover)
# initiate sunpath based on location
sp = Sunpath.from_location(location)
sp.is_leap_year = cloud_cover.header.analysis_period.is_leap_year
a_per = cloud_cover.header.analysis_period
# calculate parameters needed for zhang-huang irradiance
date_times = []
altitudes = []
doys = []
dry_bulb_t3_hrs = []
for count, t_date in enumerate(cloud_cover.datetimes):
date_times.append(t_date)
sun = sp.calculate_sun_from_date_time(t_date)
altitudes.append(sun.altitude)
doys.append(sun.datetime.doy)
dry_bulb_t3_hrs.append(dry_bulb_temperature[count - (3 * a_per.timestep)])
# calculate zhang-huang irradiance
dir_ir, diff_ir = zhang_huang_solar_split(
altitudes, doys, cloud_cover.values, relative_humidity.values,
dry_bulb_temperature.values, dry_bulb_t3_hrs, wind_speed.values,
atm_pressure, use_disc)
# assemble the results into DataCollections
metadata = {'source': location.source, 'country': location.country,
'city': location.city}
dni_head = Header(DirectNormalIrradiance(), 'W/m2', a_per, metadata)
dhi_head = Header(DiffuseHorizontalIrradiance(), 'W/m2', a_per, metadata)
dni = HourlyContinuousCollection(dni_head, dir_ir)
dhi = HourlyContinuousCollection(dhi_head, diff_ir)
return cls(location, dni, dhi)
@property
def enforce_on_hour(self):
"""Get or set a boolean for whether datetimes occur on the hour.
By default, datetimes will be on the half-hour whenever the Wea has a
timestep of 1, which aligns best with epw data. Setting this property
to True will force the datetimes to be on the hour. Note that this
property has no effect when the Wea timestep is not 1.
"""
return self._enforce_on_hour
@enforce_on_hour.setter
def enforce_on_hour(self, value):
self._enforce_on_hour = bool(value)
@property
def datetimes(self):
"""Get the datetimes in the Wea as a tuple of datetimes."""
if self.timestep == 1 and not self._enforce_on_hour:
return tuple(dt.add_minute(30) for dt in
self.direct_normal_irradiance.datetimes)
else:
return self.direct_normal_irradiance.datetimes
@property
def hoys(self):
"""Get the hours of the year in Wea as a tuple of floats."""
return tuple(dt.hoy for dt in self.datetimes)
@property
def analysis_period(self):
"""Get an AnalysisPeriod for the Wea data."""
return self._direct_normal_irradiance.header.analysis_period
@property
def timestep(self):
"""Get the timesteps per hour of the Wea as an integer."""
return self._timestep
@property
def is_leap_year(self):
"""Get a boolean for whether the irradiance data is for a leap year."""
return self._is_leap_year
@property
def is_continuous(self):
"""Get a boolean for whether the irradiance data is continuous."""
return isinstance(self._direct_normal_irradiance, HourlyContinuousCollection)
@property
def is_annual(self):
"""Get a boolean for whether the irradiance data is for an entire year."""
return self.is_continuous and self.analysis_period.is_annual
@property
def header(self):
"""Get the Wea header as a string."""
return "place %s\n" % self.location.city + \
"latitude %.2f\n" % self.location.latitude + \
"longitude %.2f\n" % -self.location.longitude + \
"time_zone %d\n" % (-self.location.time_zone * 15) + \
"site_elevation %.1f\n" % self.location.elevation + \
"weather_data_file_units 1\n"
@property
def location(self):
"""Get or set a Ladybug Location object for the Wea."""
return self._location
@location.setter
def location(self, value):
assert isinstance(value, Location), \
'Wea.location data must be a Ladybug Location. Got {}'.format(type(value))
self._location = value
@property
def direct_normal_irradiance(self):
"""Get or set a hourly data collection for the direct normal irradiance."""
return self._direct_normal_irradiance
@direct_normal_irradiance.setter
def direct_normal_irradiance(self, data):
acceptable_colls = (HourlyContinuousCollection, HourlyDiscontinuousCollection)
assert isinstance(data, acceptable_colls), 'Input irradiance data for ' \
'Wea must be an hourly data collection. Got {}.'.format(type(data))
assert data.is_collection_aligned(self.diffuse_horizontal_irradiance), \
'Wea direct normal and diffuse horizontal ' \
'irradiance collections must be aligned with one another.'
assert isinstance(data.header.data_type, DirectNormalIrradiance), \
'direct_normal_irradiance data type must be' \
'DirectNormalIrradiance. Got {}'.format(type(data.header.data_type))
self._direct_normal_irradiance = data
@property
def diffuse_horizontal_irradiance(self):
"""Get or set a hourly data collection for the diffuse horizontal irradiance."""
return self._diffuse_horizontal_irradiance
@diffuse_horizontal_irradiance.setter
def diffuse_horizontal_irradiance(self, data):
acceptable_colls = (HourlyContinuousCollection, HourlyDiscontinuousCollection)
assert isinstance(data, acceptable_colls), 'Input irradiance data for ' \
'Wea must be an hourly data collection. Got {}.'.format(type(data))
assert data.is_collection_aligned(self.direct_normal_irradiance), \
'Wea direct normal and diffuse horizontal ' \
'irradiance collections must be aligned with one another.'
assert isinstance(data.header.data_type, DiffuseHorizontalIrradiance), \
'direct_normal_irradiance data type must be' \
'DiffuseHorizontalIrradiance. Got {}'.format(type(data.header.data_type))
self._diffuse_horizontal_irradiance = data
@property
def global_horizontal_irradiance(self):
"""Get a data collection for the global horizontal irradiance."""
header_ghr = Header(data_type=GlobalHorizontalIrradiance(),
unit='W/m2',
analysis_period=self.analysis_period,
metadata=self.metadata)
glob_horiz = []
sp = Sunpath.from_location(self.location)
sp.is_leap_year = self.is_leap_year
for dt, dnr, dhr in zip(self.datetimes, self.direct_normal_irradiance,
self.diffuse_horizontal_irradiance):
sun = sp.calculate_sun_from_date_time(dt)
glob_horiz.append(dhr + dnr * math.sin(math.radians(sun.altitude)))
return self._aligned_collection(header_ghr, glob_horiz)
@property
def direct_horizontal_irradiance(self):
"""Get a data collection for the direct irradiance on a horizontal surface.
Note that this is different from the direct_normal_irradiance needed
to construct a Wea, which is NORMAL and not HORIZONTAL.
"""
header_dhr = Header(data_type=DirectHorizontalIrradiance(),
unit='W/m2',
analysis_period=self.analysis_period,
metadata=self.metadata)
direct_horiz = []
sp = Sunpath.from_location(self.location)
sp.is_leap_year = self.is_leap_year
for dt, dnr in zip(self.datetimes, self.direct_normal_irradiance):
sun = sp.calculate_sun_from_date_time(dt)
direct_horiz.append(dnr * math.sin(math.radians(sun.altitude)))
return self._aligned_collection(header_dhr, direct_horiz)
def filter_by_pattern(self, pattern):
"""Create a new filtered Wea from this Wea using a list of booleans.
Args:
pattern: An array of True/False values. This array should usually
have a length matching the number of irradiance values in the Wea
but it can also be a pattern to be repeated over the data.
Returns:
A new Wea filtered by the analysis period.
"""
return Wea(
self.location,
self.direct_normal_irradiance.filter_by_pattern(pattern),
self.diffuse_horizontal_irradiance.filter_by_pattern(pattern))
def filter_by_analysis_period(self, analysis_period):
"""Create a new filtered Wea from this Wea based on an analysis period.
Args:
analysis period: A Ladybug analysis period.
Returns:
A new Wea filtered by the analysis period.
"""
return Wea(
self.location,
self.direct_normal_irradiance.filter_by_analysis_period(analysis_period),
self.diffuse_horizontal_irradiance.filter_by_analysis_period(analysis_period))
def filter_by_hoys(self, hoys):
"""Create a new filtered Wea from this Wea using a list of hours of the year.
Args:
hoys: A List of hours of the year 0..8759.
Returns:
A new Wea with filtered data.
"""
return Wea(
self.location,
self.direct_normal_irradiance.filter_by_hoys(hoys),
self.diffuse_horizontal_irradiance.filter_by_hoys(hoys))
def filter_by_moys(self, moys):
"""Create a new filtered Wea from this Wea based on a list of minutes of the year.
Args:
moys: A List of minutes of the year [0..8759 * 60].
Returns:
A new Wea with filtered data.
"""
return Wea(
self.location,
self.direct_normal_irradiance.filter_by_moys(moys),
self.diffuse_horizontal_irradiance.filter_by_moys(moys))
def filter_by_sun_up(self, min_altitude=0):
"""Create a new filtered Wea from this Wea based on whether the sun is up
Args:
min_altitude: A number for the minimum altitude above the horizon at
which the sun is considered up in degrees. Setting this to 0 will
filter values for all hours where the sun is physically above the
horizon. By setting this to a negative number (eg. -6), various levels
of twilight can be used to filter the data (eg. civil twilight).
Positive numbers can be used to discount low sun angles (Default: 0).
Returns:
A new Wea with filtered data.
"""
sp = Sunpath.from_location(self.location)
sp.is_leap_year = self.is_leap_year
pattern = []
for dt in self.datetimes:
sun = sp.calculate_sun_from_date_time(dt)
sun_up = True if sun.altitude > min_altitude else False
pattern.append(sun_up)
return self.filter_by_pattern(pattern)
def get_irradiance_value(self, month, day, hour):
"""Get direct and diffuse irradiance values for a point in time.
Args:
month: Integer for month of the year [1 - 12].
day: Integer for the day of the month [1 - 31].
hour: Float for hour of the day [0 - 23].
"""
dt = DateTime(month, day, hour, leap_year=self.is_leap_year)
try:
count = int(dt.hoy * self.timestep) if self.is_annual else \
self.direct_normal_irradiance.datetimes.index(dt)
except ValueError as e:
raise ValueError('Datetime {} was not found in the Wea.\n{}'.format(dt, e))
return self.direct_normal_irradiance[count], \
self.diffuse_horizontal_irradiance[count]
def get_irradiance_value_for_hoy(self, hoy):
"""Get direct and diffuse irradiance values for a hoy.
Args:
hoy: Float for hour of the year [0 - 8759].
"""
try:
count = int(hoy * self.timestep) if self.is_annual else \
self.direct_normal_irradiance.datetimes.index(DateTime.from_hoy(hoy))
except ValueError as e:
raise ValueError('HOY {} was not found in the Wea.\n{}'.format(hoy, e))
return self.direct_normal_irradiance[count], \
self.diffuse_horizontal_irradiance[count]
def directional_irradiance(self, altitude=90, azimuth=180,
ground_reflectance=0.2, isotropic=True):
"""Get the irradiance components for a surface facing a given direction.
Note this method computes unobstructed solar flux facing a given
altitude and azimuth. The default is set to return the global horizontal
irradiance, assuming an altitude facing straight up (90 degrees).
Args:
altitude: A number between -90 and 90 that represents the
altitude at which irradiance is being evaluated in degrees.
azimuth: A number between 0 and 360 that represents the
azimuth at which irradiance is being evaluated in degrees.
ground_reflectance: A number between 0 and 1 that represents the
reflectance of the ground. Default is set to 0.2. Some
common ground reflectances are:
* urban: 0.18
* grass: 0.20
* fresh grass: 0.26
* soil: 0.17
* sand: 0.40
* snow: 0.65
* fresh_snow: 0.75
* asphalt: 0.12
* concrete: 0.30
* sea: 0.06
isotropic: A boolean value that sets whether an isotropic sky is
used (as opposed to an anisotropic sky). An isotropic sky
assumes an even distribution of diffuse irradiance across the
sky while an anisotropic sky places more diffuse irradiance
near the solar disc. (Default: True).
Returns:
A tuple of four elements
- total_irradiance: A data collection of total solar irradiance.
- direct_irradiance: A data collection of direct solar irradiance.
- diffuse_irradiance: A data collection of diffuse sky solar irradiance.
- reflected_irradiance: A data collection of ground reflected solar
irradiance.
"""
# function to convert polar coordinates to xyz.
def pol2cart(phi, theta):
mult = math.cos(theta)
x = math.sin(phi) * mult
y = math.cos(phi) * mult
z = math.sin(theta)
return Vector3D(x, y, z)
# convert the altitude and azimuth to a normal vector
normal = pol2cart(math.radians(azimuth), math.radians(altitude))
# create sunpath and get altitude at every timestep of the year
dir_irr, diff_irr, ref_irr, total_irr = [], [], [], []
sp = Sunpath.from_location(self.location)
sp.is_leap_year = self.is_leap_year
for dt, dnr, dhr in zip(self.datetimes, self.direct_normal_irradiance,
self.diffuse_horizontal_irradiance):
sun = sp.calculate_sun_from_date_time(dt)
sun_vec = pol2cart(math.radians(sun.azimuth),
math.radians(sun.altitude))
vec_angle = sun_vec.angle(normal)
# direct irradiance on surface
srf_dir = 0
if sun.altitude > 0 and vec_angle < math.pi / 2:
srf_dir = dnr * math.cos(vec_angle)
# diffuse irradiance on surface
if isotropic:
srf_dif = dhr * ((math.sin(math.radians(altitude)) / 2) + 0.5)
else:
y = max(0.45, 0.55 + (0.437 * math.cos(vec_angle)) + 0.313 *
math.cos(vec_angle) * 0.313 * math.cos(vec_angle))
srf_dif = dhr * (y * (
math.sin(math.radians(abs(90 - altitude)))) +
math.cos(math.radians(abs(90 - altitude))))
# reflected irradiance on surface.
e_glob = dhr + dnr * math.cos(math.radians(90 - sun.altitude))
srf_ref = e_glob * ground_reflectance * (0.5 - (math.sin(
math.radians(altitude)) / 2))
# add it all together
dir_irr.append(srf_dir)
diff_irr.append(srf_dif)
ref_irr.append(srf_ref)
total_irr.append(srf_dir + srf_dif + srf_ref)
# create the headers
data_head = Header(Irradiance(), 'W/m2', self.analysis_period, self.metadata)
# create the data collections
direct_irradiance = self._aligned_collection(data_head, dir_irr)
diffuse_irradiance = self._aligned_collection(data_head, diff_irr)
reflected_irradiance = self._aligned_collection(data_head, ref_irr)
total_irradiance = self._aligned_collection(data_head, total_irr)
return total_irradiance, direct_irradiance, \
diffuse_irradiance, reflected_irradiance
def estimate_illuminance_components(self, dew_point):
"""Get estimated direct, diffuse, and global illuminance from this Wea.
Note that this method should only be used when there are no measured
illuminance values that correspond to this Wea's irradiance values.
Because the illuminance components calculated here are simply estimated
using a model by Perez [1], they are not as accurate as true measured values.
Note:
[1] Perez R. (1990). 'Modeling Daylight Availability and Irradiance
Components from Direct and Global Irradiance'. Solar Energy.
Vol. 44. No. 5, pp. 271-289. USA.
Args:
dew_point: A data collection of dewpoint temperature in degrees C. This
data collection must align with the irradiance data on this object.
Returns:
A tuple with four elements
- global_horiz_ill: Data collection of Global Horizontal Illuminance
in lux.
- direct_normal_ill: Data collection of Direct Normal Illuminance in lux.
- diffuse_horizontal_ill: Data collection of Diffuse Horizontal
Illuminance in lux.
- zenith_lum: Data collection of Zenith Luminance in lux.
"""
# check the dew_point input
assert dew_point.is_collection_aligned(self.direct_normal_irradiance), \
'Input dew_point data must be aligned with the irradiance on the Wea.'
# calculate illuminance values
sp = Sunpath.from_location(self.location)
sp.is_leap_year = self.is_leap_year
gh_ill_values, dn_ill_values, dh_ill_values, zen_lum_values = [], [], [], []
for dt, dp, ghi, dni, dhi in zip(
self.datetimes, dew_point, self.global_horizontal_irradiance,
self.direct_normal_irradiance, self.diffuse_horizontal_irradiance):
alt = sp.calculate_sun_from_date_time(dt).altitude
gh, dn, dh, z = estimate_illuminance_from_irradiance(alt, ghi, dni, dhi, dp)
gh_ill_values.append(gh)
dn_ill_values.append(dn)
dh_ill_values.append(dh)
zen_lum_values.append(z)
# create data collection headers for the results
gh_ill_head = Header(GlobalHorizontalIlluminance(), 'lux',
self.analysis_period, self.metadata)
dn_ill_head = Header(DirectNormalIlluminance(), 'lux',
self.analysis_period, self.metadata)
dh_ill_head = Header(DiffuseHorizontalIlluminance(), 'lux',
self.analysis_period, self.metadata)
zen_lum_head = Header(ZenithLuminance(), 'cd/m2',
self.analysis_period, self.metadata)
# create data collections to hold illuminance results
global_horiz_ill = self._aligned_collection(gh_ill_head, gh_ill_values)
direct_normal_ill = self._aligned_collection(dn_ill_head, dn_ill_values)
diffuse_horizontal_ill = self._aligned_collection(dh_ill_head, dh_ill_values)
zenith_lum = self._aligned_collection(zen_lum_head, zen_lum_values)
return global_horiz_ill, direct_normal_ill, diffuse_horizontal_ill, zenith_lum
def to_dict(self):
"""Get the Wea as a dictionary."""
base = {
'type': 'Wea',
'location': self.location.to_dict(),
'direct_normal_irradiance': self.direct_normal_irradiance.values,
'diffuse_horizontal_irradiance': self.diffuse_horizontal_irradiance.values,
'timestep': self.timestep,
'is_leap_year': self.is_leap_year
}
if not self.is_annual:
dts = self.direct_normal_irradiance.datetimes
base['datetimes'] = [dat.to_array() for dat in dts]
return base
def to_file_string(self):
"""Get a text string for the entirety of the Wea file contents."""
lines = [self.header]
for dir_rad, dif_rad, dt in zip(self.direct_normal_irradiance,
self.diffuse_horizontal_irradiance,
self.datetimes):
line = "%d %d %.3f %d %d\n" \
% (dt.month, dt.day, dt.float_hour, dir_rad, dif_rad)
lines.append(line)
return ''.join(lines)
def write(self, file_path, write_hours=False):
"""Write the Wea object to a .wea file and return the file path.
Args:
file_path: Text string for the path to where the .wea file should be written.
write_hours: Boolean to note whether a .hrs file should be written
next to the .wea file, which lists the hours of the year (hoys)
contained within the .wea file.
"""
# write the .wea file
if not file_path.lower().endswith('.wea'):
file_path += '.wea'
file_data = self.to_file_string()
write_to_file(file_path, file_data, True)
# write the .hrs file if requested
if write_hours:
hrs_file_path = file_path[:-4] + '.hrs'
hrs_data = ','.join(str(h) for h in self.hoys) + '\n'
write_to_file(hrs_file_path, hrs_data, True)
return file_path
def duplicate(self):
"""Duplicate location."""
return self.__copy__()
@staticmethod
def to_constant_value(wea_file, value=1000):
"""Convert a Wea file to have a constant value for each datetime.
This is useful in workflows where hourly irradiance values are inconsequential
to the analysis and one is only using the Wea as a format to pass location
and datetime information (eg. for direct sun hours).
Args:
wea_file: Full path to .wea file.
value: The direct and diffuse irradiance value that will be written
in for all datetimes of the Wea.
Returns:
Text string of Wea file contents with all irradiance values replaces
with the input value.
"""
assert os.path.isfile(wea_file), 'Failed to find {}'.format(wea_file)
new_lines, value = [], str(int(value))
with open(wea_file, readmode) as weaf:
for i in range(6):
new_lines.append(weaf.readline())
for line in weaf:
vals = line.split()
vals[-2], vals[-1] = value, value
new_lines.append(' '.join(vals) + '\n')
return ''.join(new_lines)
def _aligned_collection(self, header, values):
"""Process a header and values into a collection aligned with Wea data."""
if self.is_continuous:
return HourlyContinuousCollection(header, values)
else:
dts = self.direct_normal_irradiance.datetimes
return HourlyDiscontinuousCollection(header, values, dts)
@staticmethod
def _get_datetimes(timestep, is_leap_year):
"""Get a list of annual datetimes based on timestep.
This method should only be used for classmethods. For datetimes use
datetimes or hoys methods.
"""
hour_count = 8760 + 24 if is_leap_year else 8760
adjust_time = 30 if timestep == 1 else 0
return tuple(
DateTime.from_moy(60.0 * count / timestep + adjust_time, is_leap_year)
for count in xrange(hour_count * timestep)
)
@staticmethod
def _get_data_collections(dnr_values, dhr_values, metadata, timestep, is_leap_year):
"""Return two annual data collections for Direct Normal, Diffuse Horizontal."""
analysis_period = AnalysisPeriod(timestep=timestep, is_leap_year=is_leap_year)
dnr_header = Header(data_type=DirectNormalIrradiance(),
unit='W/m2',
analysis_period=analysis_period,
metadata=metadata)
direct_norm_rad = HourlyContinuousCollection(dnr_header, dnr_values)
dhr_header = Header(data_type=DiffuseHorizontalIrradiance(),
unit='W/m2',
analysis_period=analysis_period,
metadata=metadata)
diffuse_horiz_rad = HourlyContinuousCollection(dhr_header, dhr_values)
return direct_norm_rad, diffuse_horiz_rad
@staticmethod
def _parse_wea_header(weaf, wea_file_name):
"""Parse the Ladybug location from a wea header given the wea file object."""
first_line = weaf.readline()
assert first_line.startswith('place'), 'Failed to find place in .wea header.\n' \
'{} is not a valid wea file.'.format(wea_file_name)
location = Location()
location.city = ' '.join(first_line.split()[1:])
location.latitude = float(weaf.readline().split()[-1])
location.longitude = -float(weaf.readline().split()[-1])
location.time_zone = -int(weaf.readline().split()[-1]) / 15
location.elevation = float(weaf.readline().split()[-1])
weaf.readline() # pass line for weather data units
return location
def ToString(self):
"""Overwrite .NET ToString."""
return self.__repr__()
def __len__(self):
return len(self.direct_normal_irradiance)
def __getitem__(self, key):
return self.direct_normal_irradiance[key], self.diffuse_horizontal_irradiance[key]
def __iter__(self):
return zip(self.direct_normal_irradiance.values,
self.diffuse_horizontal_irradiance.values)
def __key(self):
return self.location, self.direct_horizontal_irradiance, \
self.diffuse_horizontal_irradiance
def __eq__(self, other):
return isinstance(other, Wea) and self.__key() == other.__key()
def __ne__(self, value):
return not self.__eq__(value)
def __copy__(self):
new_wea = Wea(
self.location.duplicate(),
self.direct_normal_irradiance.duplicate(),
self.diffuse_horizontal_irradiance.duplicate()
)
new_wea._enforce_on_hour = self._enforce_on_hour
new_wea.metadata = deepcopy(self.metadata)
return new_wea
def __repr__(self):
"""Wea object representation."""
return "WEA [%s]" % self.location.city
|
ladybug-analysis-tools/ladybug-core
|
ladybug/wea.py
|
Python
|
gpl-3.0
| 52,788
|
[
"EPW"
] |
fb7e41567da2ab1d4fe6e076f617803986580ac631621dd42d51ce25aa158ff9
|
#* This file is part of the MOOSE framework
#* https://www.mooseframework.org
#*
#* All rights reserved, see COPYRIGHT for full restrictions
#* https://github.com/idaholab/moose/blob/master/COPYRIGHT
#*
#* Licensed under LGPL 2.1, please see LICENSE for details
#* https://www.gnu.org/licenses/lgpl-2.1.html
import os
import uuid
import mooseutils
import moosetree
import collections
import MooseDocs
from .. import common
from ..common import exceptions, report_error
from ..base import components
from ..tree import tokens, html, latex
from . import core, command
def make_extension(**kwargs):
return ModalExtension(**kwargs)
ModalLink = tokens.newToken('ModalLink', content=None, title=None)
ModalSourceLink = tokens.newToken('ModalSourceLink', src=None, title=None, language=None)
class ModalExtension(command.CommandExtension):
"""
Adds ability to create links to complete source files.
This extension does not add any commands; it provides a token to be created by other packages.
It was created to have a single control point for toggling display of complete source.
"""
@staticmethod
def defaultConfig():
config = command.CommandExtension.defaultConfig()
config['show_source'] = (True, "Toggle the display of complete source files.")
return config
def __init__(self, *args, **kwargs):
command.CommandExtension.__init__(self, *args, **kwargs)
# storage to allow all modal content to be added to end of content
self.__modals = collections.defaultdict(list)
def addModal(self, uid, div):
self.__modals[uid].append(div)
def extend(self, reader, renderer):
renderer.add('ModalLink', RenderModalLinkToken())
renderer.add('ModalSourceLink', RenderSourceLinkToken())
def postRender(self, page, results):
parent = moosetree.find(results.root, lambda n: n.name == 'div' and 'moose-content' in n['class'])
for div in self.__modals.get(page.uid, list()):
div.parent = parent
class RenderModalLinkToken(components.RenderComponent):
def createHTML(self, parent, token, page):
# Must have children, otherwise nothing exists to click
if not token.children:
msg = report_error("The 'ModalLink' token requires children.", page.source,
token.info.line if token.info else None,
token.info[0] if token.info else token.text())
raise exceptions.MooseDocsException(msg)
return html.Tag(parent, 'span', class_='moose-modal-link')
def createMaterialize(self, parent, token, page):
# Must have children, otherwise nothing exists to click
if not token.children:
msg = report_error("The 'ModalLink' token requires children.", page.source,
token.info.line if token.info else None,
token.info[0] if token.info else token.text())
raise exceptions.MooseDocsException(msg)
# Check 'content' is correctly provided
content = token['content']
if isinstance(content, str):
content = core.Paragraph(None, string=content)
elif not isinstance(content, tokens.Token):
msg = report_error("The 'ModalLink' token 'content' attribute must be a string or Token.", page.source,
token.info.line if token.info else None,
token.info[0] if token.info else token.text())
raise exceptions.MooseDocsException(msg)
# Create the <div> for the modal content
uid = uuid.uuid4()
modal_div = html.Tag(None, 'div', class_='moose-modal modal', id_=str(uid))
modal_content = html.Tag(modal_div, 'div', class_="modal-content")
# Title
if token['title']:
h = core.Heading(None, level=4, string=token['title'])
self.renderer.render(modal_content, h, page)
# Content
self.renderer.render(modal_content, content, page)
self.extension.addModal(page.uid, modal_div)
# Return link to modal window
return html.Tag(parent, 'a', href='#{}'.format(uid), class_='moose-modal-link modal-trigger')
def createLatex(self, parent, token, page):
return parent
class RenderSourceLinkToken(components.RenderComponent):
def createHTML(self, parent, token, page):
string = '({})'.format(os.path.relpath(token['src'], MooseDocs.ROOT_DIR)) if not token.children else None
return html.Tag(parent, 'span', string=string, class_='moose-source-filename')
def createMaterialize(self, parent, token, page):
fullpath = token['src']
text = '({})'.format(os.path.relpath(token['src'], MooseDocs.ROOT_DIR))
string = text if not token.children else None
a = html.Tag(parent, 'span', string=string, class_='moose-source-filename tooltipped')
# This should remain a Extension option, so it can be disable universally
if self.extension['show_source']:
# Create the <div> for the modal content
uid = uuid.uuid4()
modal_div = html.Tag(None, 'div', class_='moose-modal modal', id_=str(uid))
modal_content = html.Tag(modal_div, 'div', class_="modal-content")
self.extension.addModal(page.uid, modal_div)
# Add the title and update the span to be the <a> trigger
html.Tag(modal_content, 'h4', string=token['title'] or text)
a.name = 'a'
a['href'] = '#{}'.format(uid)
a.addClass('modal-trigger')
footer = html.Tag(modal_div, 'div', class_='modal-footer')
html.Tag(footer, 'a', class_='modal-close btn-flat', string='Close')
source = common.project_find(fullpath)
if len(source) > 1:
options = mooseutils.levenshteinDistance(fullpath, source, number=8)
msg = "Multiple files match the supplied filename {}:\n".format(fullpath)
for opt in options:
msg += " {}\n".format(opt)
msg = report_error(msg, page.source,
token.info.line if token.info else None,
token.info[0] if token.info else token.text())
raise exceptions.MooseDocsException(msg)
elif len(source) == 0:
msg = "Unable to locate file that matches the supplied filename {}\n".format(fullpath)
msg = report_error(msg, page.source,
token.info.line if token.info else None,
token.info[0] if token.info else token.text())
raise exceptions.MooseDocsException(msg)
content = common.fix_moose_header(common.read(source[0]))
language = token['language'] or common.get_language(source[0])
code = core.Code(None, language=language, content=content)
self.renderer.render(modal_content, code, page)
return a
def createLatex(self, parent, token, page):
if not token.children:
latex.String(parent, content='({})'.format(os.path.relpath(token['src'], MooseDocs.ROOT_DIR)))
return parent
|
harterj/moose
|
python/MooseDocs/extensions/modal.py
|
Python
|
lgpl-2.1
| 7,311
|
[
"MOOSE"
] |
ecb2612774b9d73a3ee058caa370ccabef0e48a155c125eb55e9a1c48f96ac8f
|
"""
Perform Levenberg-Marquardt least-squares minimization, based on MINPACK-1.
AUTHORS
The original version of this software, called LMFIT, was written in FORTRAN
as part of the MINPACK-1 package by XXX.
Craig Markwardt converted the FORTRAN code to IDL. The information for the
IDL version is:
Craig B. Markwardt, NASA/GSFC Code 662, Greenbelt, MD 20770
craigm@lheamail.gsfc.nasa.gov
UPDATED VERSIONs can be found on my WEB PAGE:
http://cow.physics.wisc.edu/~craigm/idl/idl.html
Mark Rivers created this Python version from Craig's IDL version.
Mark Rivers, University of Chicago
Building 434A, Argonne National Laboratory
9700 South Cass Avenue, Argonne, IL 60439
rivers@cars.uchicago.edu
Updated versions can be found at http://cars.uchicago.edu/software
DESCRIPTION
MPFIT uses the Levenberg-Marquardt technique to solve the
least-squares problem. In its typical use, MPFIT will be used to
fit a user-supplied function (the "model") to user-supplied data
points (the "data") by adjusting a set of parameters. MPFIT is
based upon MINPACK-1 (LMDIF.F) by More' and collaborators.
For example, a researcher may think that a set of observed data
points is best modelled with a Gaussian curve. A Gaussian curve is
parameterized by its mean, standard deviation and normalization.
MPFIT will, within certain constraints, find the set of parameters
which best fits the data. The fit is "best" in the least-squares
sense; that is, the sum of the weighted squared differences between
the model and data is minimized.
The Levenberg-Marquardt technique is a particular strategy for
iteratively searching for the best fit. This particular
implementation is drawn from MINPACK-1 (see NETLIB), and is much faster
and more accurate than the version provided in the Scientific Python package
in Scientific.Functions.LeastSquares.
This version allows upper and lower bounding constraints to be placed on each
parameter, or the parameter can be held fixed.
The user-supplied Python function should return an array of weighted
deviations between model and data. In a typical scientific problem
the residuals should be weighted so that each deviate has a
gaussian sigma of 1.0. If X represents values of the independent
variable, Y represents a measurement for each value of X, and ERR
represents the error in the measurements, then the deviates could
be calculated as follows:
DEVIATES = (Y - F(X)) / ERR
where F is the analytical function representing the model. You are
recommended to use the convenience functions MPFITFUN and
MPFITEXPR, which are driver functions that calculate the deviates
for you. If ERR are the 1-sigma uncertainties in Y, then
TOTAL( DEVIATES^2 )
will be the total chi-squared value. MPFIT will minimize the
chi-square value. The values of X, Y and ERR are passed through
MPFIT to the user-supplied function via the FUNCTKW keyword.
Simple constraints can be placed on parameter values by using the
PARINFO keyword to MPFIT. See below for a description of this
keyword.
MPFIT does not perform more general optimization tasks. See TNMIN
instead. MPFIT is customized, based on MINPACK-1, to the
least-squares minimization problem.
USER FUNCTION
The user must define a function which returns the appropriate
values as specified above. The function should return the weighted
deviations between the model and the data. It should also return a status
flag and an optional partial derivative array. For applications which
use finite-difference derivatives -- the default -- the user
function should be declared in the following way:
def myfunct(p, fjac=None, x=None, y=None, err=None)
# Parameter values are passed in "p"
# If fjac==None then partial derivatives should not be
# computed. It will always be None if MPFIT is called with default
# flag.
model = F(x, p)
# Non-negative status value means MPFIT should continue, negative means
# stop the calculation.
status = 0
return([status, (y-model)/err]
See below for applications with analytical derivatives.
The keyword parameters X, Y, and ERR in the example above are
suggestive but not required. Any parameters can be passed to
MYFUNCT by using the functkw keyword to MPFIT. Use MPFITFUN and
MPFITEXPR if you need ideas on how to do that. The function *must*
accept a parameter list, P.
In general there are no restrictions on the number of dimensions in
X, Y or ERR. However the deviates *must* be returned in a
one-dimensional Numeric array of type Float.
User functions may also indicate a fatal error condition using the
status return described above. If status is set to a number between
-15 and -1 then MPFIT will stop the calculation and return to the caller.
ANALYTIC DERIVATIVES
In the search for the best-fit solution, MPFIT by default
calculates derivatives numerically via a finite difference
approximation. The user-supplied function need not calculate the
derivatives explicitly. However, if you desire to compute them
analytically, then the AUTODERIVATIVE=0 keyword must be passed to MPFIT.
As a practical matter, it is often sufficient and even faster to allow
MPFIT to calculate the derivatives numerically, and so
AUTODERIVATIVE=0 is not necessary.
If AUTODERIVATIVE=0 is used then the user function must check the parameter
FJAC, and if FJAC!=None then return the partial derivative array in the
return list.
def myfunct(p, fjac=None, x=None, y=None, err=None)
# Parameter values are passed in "p"
# If FJAC!=None then partial derivatives must be comptuer.
# FJAC contains an array of len(p), where each entry
# is 1 if that parameter is free and 0 if it is fixed.
model = F(x, p)
Non-negative status value means MPFIT should continue, negative means
# stop the calculation.
status = 0
if (dojac):
pderiv = Numeric.zeros([len(x), len(p)], Numeric.Float)
for j in range(len(p)):
pderiv[:,j] = FGRAD(x, p, j)
else:
pderiv = None
return([status, (y-model)/err, pderiv]
where FGRAD(x, p, i) is a user function which must compute the
derivative of the model with respect to parameter P[i] at X. When
finite differencing is used for computing derivatives (ie, when
AUTODERIVATIVE=1), or when MPFIT needs only the errors but not the
derivatives the parameter FJAC=None.
Derivatives should be returned in the PDERIV array. PDERIV should be an m x
n array, where m is the number of data points and n is the number
of parameters. dp[i,j] is the derivative at the ith point with
respect to the jth parameter.
The derivatives with respect to fixed parameters are ignored; zero
is an appropriate value to insert for those derivatives. Upon
input to the user function, FJAC is set to a vector with the same
length as P, with a value of 1 for a parameter which is free, and a
value of zero for a parameter which is fixed (and hence no
derivative needs to be calculated).
If the data is higher than one dimensional, then the *last*
dimension should be the parameter dimension. Example: fitting a
50x50 image, "dp" should be 50x50xNPAR.
CONSTRAINING PARAMETER VALUES WITH THE PARINFO KEYWORD
The behavior of MPFIT can be modified with respect to each
parameter to be fitted. A parameter value can be fixed; simple
boundary constraints can be imposed; limitations on the parameter
changes can be imposed; properties of the automatic derivative can
be modified; and parameters can be tied to one another.
These properties are governed by the PARINFO structure, which is
passed as a keyword parameter to MPFIT.
PARINFO should be a list of dictionaries, one list entry for each parameter.
Each parameter is associated with one element of the array, in
numerical order. The dictionary can have the following keys
(none are required, keys are case insensitive):
'value' - the starting parameter value (but see the START_PARAMS
parameter for more information).
'fixed' - a boolean value, whether the parameter is to be held
fixed or not. Fixed parameters are not varied by
MPFIT, but are passed on to MYFUNCT for evaluation.
'limited' - a two-element boolean array. If the first/second
element is set, then the parameter is bounded on the
lower/upper side. A parameter can be bounded on both
sides. Both LIMITED and LIMITS must be given
together.
'limits' - a two-element float array. Gives the
parameter limits on the lower and upper sides,
respectively. Zero, one or two of these values can be
set, depending on the values of LIMITED. Both LIMITED
and LIMITS must be given together.
'parname' - a string, giving the name of the parameter. The
fitting code of MPFIT does not use this tag in any
way. However, the default iterfunct will print the
parameter name if available.
'step' - the step size to be used in calculating the numerical
derivatives. If set to zero, then the step size is
computed automatically. Ignored when AUTODERIVATIVE=0.
'mpside' - the sidedness of the finite difference when computing
numerical derivatives. This field can take four
values:
0 - one-sided derivative computed automatically
1 - one-sided derivative (f(x+h) - f(x) )/h
-1 - one-sided derivative (f(x) - f(x-h))/h
2 - two-sided derivative (f(x+h) - f(x-h))/(2*h)
Where H is the STEP parameter described above. The
"automatic" one-sided derivative method will chose a
direction for the finite difference which does not
violate any constraints. The other methods do not
perform this check. The two-sided method is in
principle more precise, but requires twice as many
function evaluations. Default: 0.
'mpmaxstep' - the maximum change to be made in the parameter
value. During the fitting process, the parameter
will never be changed by more than this value in
one iteration.
A value of 0 indicates no maximum. Default: 0.
'tied' - a string expression which "ties" the parameter to other
free or fixed parameters. Any expression involving
constants and the parameter array P are permitted.
Example: if parameter 2 is always to be twice parameter
1 then use the following: parinfo(2).tied = '2 * p(1)'.
Since they are totally constrained, tied parameters are
considered to be fixed; no errors are computed for them.
[ NOTE: the PARNAME can't be used in expressions. ]
'mpprint' - if set to 1, then the default iterfunct will print the
parameter value. If set to 0, the parameter value
will not be printed. This tag can be used to
selectively print only a few parameter values out of
many. Default: 1 (all parameters printed)
Future modifications to the PARINFO structure, if any, will involve
adding dictionary tags beginning with the two letters "MP".
Therefore programmers are urged to avoid using tags starting with
the same letters; otherwise they are free to include their own
fields within the PARINFO structure, and they will be ignored.
PARINFO Example:
parinfo = [{'value':0., 'fixed':0, 'limited':[0,0], 'limits':[0.,0.]}]*5
parinfo[0]['fixed'] = 1
parinfo[4]['limited'][0] = 1
parinfo[4]['limits'][0] = 50.
values = [5.7, 2.2, 500., 1.5, 2000.]
for i in range(5): parinfo[i]['value']=values[i]
A total of 5 parameters, with starting values of 5.7,
2.2, 500, 1.5, and 2000 are given. The first parameter
is fixed at a value of 5.7, and the last parameter is
constrained to be above 50.
EXAMPLE
import mpfit
import Numeric
x = Numeric.arange(100, Numeric.float)
p0 = [5.7, 2.2, 500., 1.5, 2000.]
y = ( p[0] + p[1]*[x] + p[2]*[x**2] + p[3]*Numeric.sqrt(x) +
p[4]*Numeric.log(x))
fa = {'x':x, 'y':y, 'err':err}
m = mpfit('myfunct', p0, functkw=fa)
print 'status = ', m.status
if (m.status <= 0): print 'error message = ', m.errmsg
print 'parameters = ', m.params
Minimizes sum of squares of MYFUNCT. MYFUNCT is called with the X,
Y, and ERR keyword parameters that are given by FUNCTKW. The
results can be obtained from the returned object m.
THEORY OF OPERATION
There are many specific strategies for function minimization. One
very popular technique is to use function gradient information to
realize the local structure of the function. Near a local minimum
the function value can be taylor expanded about x0 as follows:
f(x) = f(x0) + f'(x0) . (x-x0) + (1/2) (x-x0) . f''(x0) . (x-x0)
----- --------------- ------------------------------- (1)
Order 0th 1st 2nd
Here f'(x) is the gradient vector of f at x, and f''(x) is the
Hessian matrix of second derivatives of f at x. The vector x is
the set of function parameters, not the measured data vector. One
can find the minimum of f, f(xm) using Newton's method, and
arrives at the following linear equation:
f''(x0) . (xm-x0) = - f'(x0) (2)
If an inverse can be found for f''(x0) then one can solve for
(xm-x0), the step vector from the current position x0 to the new
projected minimum. Here the problem has been linearized (ie, the
gradient information is known to first order). f''(x0) is
symmetric n x n matrix, and should be positive definite.
The Levenberg - Marquardt technique is a variation on this theme.
It adds an additional diagonal term to the equation which may aid the
convergence properties:
(f''(x0) + nu I) . (xm-x0) = -f'(x0) (2a)
where I is the identity matrix. When nu is large, the overall
matrix is diagonally dominant, and the iterations follow steepest
descent. When nu is small, the iterations are quadratically
convergent.
In principle, if f''(x0) and f'(x0) are known then xm-x0 can be
determined. However the Hessian matrix is often difficult or
impossible to compute. The gradient f'(x0) may be easier to
compute, if even by finite difference techniques. So-called
quasi-Newton techniques attempt to successively estimate f''(x0)
by building up gradient information as the iterations proceed.
In the least squares problem there are further simplifications
which assist in solving eqn (2). The function to be minimized is
a sum of squares:
f = Sum(hi^2) (3)
where hi is the ith residual out of m residuals as described
above. This can be substituted back into eqn (2) after computing
the derivatives:
f' = 2 Sum(hi hi')
f'' = 2 Sum(hi' hj') + 2 Sum(hi hi'') (4)
If one assumes that the parameters are already close enough to a
minimum, then one typically finds that the second term in f'' is
negligible [or, in any case, is too difficult to compute]. Thus,
equation (2) can be solved, at least approximately, using only
gradient information.
In matrix notation, the combination of eqns (2) and (4) becomes:
hT' . h' . dx = - hT' . h (5)
Where h is the residual vector (length m), hT is its transpose, h'
is the Jacobian matrix (dimensions n x m), and dx is (xm-x0). The
user function supplies the residual vector h, and in some cases h'
when it is not found by finite differences (see MPFIT_FDJAC2,
which finds h and hT'). Even if dx is not the best absolute step
to take, it does provide a good estimate of the best *direction*,
so often a line minimization will occur along the dx vector
direction.
The method of solution employed by MINPACK is to form the Q . R
factorization of h', where Q is an orthogonal matrix such that QT .
Q = I, and R is upper right triangular. Using h' = Q . R and the
ortogonality of Q, eqn (5) becomes
(RT . QT) . (Q . R) . dx = - (RT . QT) . h
RT . R . dx = - RT . QT . h (6)
R . dx = - QT . h
where the last statement follows because R is upper triangular.
Here, R, QT and h are known so this is a matter of solving for dx.
The routine MPFIT_QRFAC provides the QR factorization of h, with
pivoting, and MPFIT_QRSOLV provides the solution for dx.
REFERENCES
MINPACK-1, Jorge More', available from netlib (www.netlib.org).
"Optimization Software Guide," Jorge More' and Stephen Wright,
SIAM, *Frontiers in Applied Mathematics*, Number 14.
More', Jorge J., "The Levenberg-Marquardt Algorithm:
Implementation and Theory," in *Numerical Analysis*, ed. Watson,
G. A., Lecture Notes in Mathematics 630, Springer-Verlag, 1977.
MODIFICATION HISTORY
Translated from MINPACK-1 in FORTRAN, Apr-Jul 1998, CM
Copyright (C) 1997-2002, Craig Markwardt
This software is provided as is without any warranty whatsoever.
Permission to use, copy, modify, and distribute modified or
unmodified copies is granted, provided this copyright and disclaimer
are included unchanged.
Translated from MPFIT (Craig Markwardt's IDL package) to Python,
August, 2002. Mark Rivers
"""
import Numeric
import types
# Original FORTRAN documentation
# **********
#
# subroutine lmdif
#
# the purpose of lmdif is to minimize the sum of the squares of
# m nonlinear functions in n variables by a modification of
# the levenberg-marquardt algorithm. the user must provide a
# subroutine which calculates the functions. the jacobian is
# then calculated by a forward-difference approximation.
#
# the subroutine statement is
#
# subroutine lmdif(fcn,m,n,x,fvec,ftol,xtol,gtol,maxfev,epsfcn,
# diag,mode,factor,nprint,info,nfev,fjac,
# ldfjac,ipvt,qtf,wa1,wa2,wa3,wa4)
#
# where
#
# fcn is the name of the user-supplied subroutine which
# calculates the functions. fcn must be declared
# in an external statement in the user calling
# program, and should be written as follows.
#
# subroutine fcn(m,n,x,fvec,iflag)
# integer m,n,iflag
# double precision x(n),fvec(m)
# ----------
# calculate the functions at x and
# return this vector in fvec.
# ----------
# return
# end
#
# the value of iflag should not be changed by fcn unless
# the user wants to terminate execution of lmdif.
# in this case set iflag to a negative integer.
#
# m is a positive integer input variable set to the number
# of functions.
#
# n is a positive integer input variable set to the number
# of variables. n must not exceed m.
#
# x is an array of length n. on input x must contain
# an initial estimate of the solution vector. on output x
# contains the final estimate of the solution vector.
#
# fvec is an output array of length m which contains
# the functions evaluated at the output x.
#
# ftol is a nonnegative input variable. termination
# occurs when both the actual and predicted relative
# reductions in the sum of squares are at most ftol.
# therefore, ftol measures the relative error desired
# in the sum of squares.
#
# xtol is a nonnegative input variable. termination
# occurs when the relative error between two consecutive
# iterates is at most xtol. therefore, xtol measures the
# relative error desired in the approximate solution.
#
# gtol is a nonnegative input variable. termination
# occurs when the cosine of the angle between fvec and
# any column of the jacobian is at most gtol in absolute
# value. therefore, gtol measures the orthogonality
# desired between the function vector and the columns
# of the jacobian.
#
# maxfev is a positive integer input variable. termination
# occurs when the number of calls to fcn is at least
# maxfev by the end of an iteration.
#
# epsfcn is an input variable used in determining a suitable
# step length for the forward-difference approximation. this
# approximation assumes that the relative errors in the
# functions are of the order of epsfcn. if epsfcn is less
# than the machine precision, it is assumed that the relative
# errors in the functions are of the order of the machine
# precision.
#
# diag is an array of length n. if mode = 1 (see
# below), diag is internally set. if mode = 2, diag
# must contain positive entries that serve as
# multiplicative scale factors for the variables.
#
# mode is an integer input variable. if mode = 1, the
# variables will be scaled internally. if mode = 2,
# the scaling is specified by the input diag. other
# values of mode are equivalent to mode = 1.
#
# factor is a positive input variable used in determining the
# initial step bound. this bound is set to the product of
# factor and the euclidean norm of diag*x if nonzero, or else
# to factor itself. in most cases factor should lie in the
# interval (.1,100.). 100. is a generally recommended value.
#
# nprint is an integer input variable that enables controlled
# printing of iterates if it is positive. in this case,
# fcn is called with iflag = 0 at the beginning of the first
# iteration and every nprint iterations thereafter and
# immediately prior to return, with x and fvec available
# for printing. if nprint is not positive, no special calls
# of fcn with iflag = 0 are made.
#
# info is an integer output variable. if the user has
# terminated execution, info is set to the (negative)
# value of iflag. see description of fcn. otherwise,
# info is set as follows.
#
# info = 0 improper input parameters.
#
# info = 1 both actual and predicted relative reductions
# in the sum of squares are at most ftol.
#
# info = 2 relative error between two consecutive iterates
# is at most xtol.
#
# info = 3 conditions for info = 1 and info = 2 both hold.
#
# info = 4 the cosine of the angle between fvec and any
# column of the jacobian is at most gtol in
# absolute value.
#
# info = 5 number of calls to fcn has reached or
# exceeded maxfev.
#
# info = 6 ftol is too small. no further reduction in
# the sum of squares is possible.
#
# info = 7 xtol is too small. no further improvement in
# the approximate solution x is possible.
#
# info = 8 gtol is too small. fvec is orthogonal to the
# columns of the jacobian to machine precision.
#
# nfev is an integer output variable set to the number of
# calls to fcn.
#
# fjac is an output m by n array. the upper n by n submatrix
# of fjac contains an upper triangular matrix r with
# diagonal elements of nonincreasing magnitude such that
#
# t t t
# p *(jac *jac)*p = r *r,
#
# where p is a permutation matrix and jac is the final
# calculated jacobian. column j of p is column ipvt(j)
# (see below) of the identity matrix. the lower trapezoidal
# part of fjac contains information generated during
# the computation of r.
#
# ldfjac is a positive integer input variable not less than m
# which specifies the leading dimension of the array fjac.
#
# ipvt is an integer output array of length n. ipvt
# defines a permutation matrix p such that jac*p = q*r,
# where jac is the final calculated jacobian, q is
# orthogonal (not stored), and r is upper triangular
# with diagonal elements of nonincreasing magnitude.
# column j of p is column ipvt(j) of the identity matrix.
#
# qtf is an output array of length n which contains
# the first n elements of the vector (q transpose)*fvec.
#
# wa1, wa2, and wa3 are work arrays of length n.
#
# wa4 is a work array of length m.
#
# subprograms called
#
# user-supplied ...... fcn
#
# minpack-supplied ... dpmpar,enorm,fdjac2,,qrfac
#
# fortran-supplied ... dabs,dmax1,dmin1,dsqrt,mod
#
# argonne national laboratory. minpack project. march 1980.
# burton s. garbow, kenneth e. hillstrom, jorge j. more
#
# **********
class mpfit:
def __init__(self, fcn, xall=None, functkw={}, parinfo=None,
ftol=1.e-10, xtol=1.e-10, gtol=1.e-10,
damp=0., maxiter=200, factor=100., nprint=1,
iterfunct='default', iterkw={}, nocovar=0,
fastnorm=0, rescale=0, autoderivative=1, quiet=0,
diag=None, epsfcn=None, debug=0):
"""
Inputs:
fcn:
The function to be minimized. The function should return the weighted
deviations between the model and the data, as described above.
xall:
An array of starting values for each of the parameters of the model.
The number of parameters should be fewer than the number of measurements.
This parameter is optional if the parinfo keyword is used (but see
parinfo). The parinfo keyword provides a mechanism to fix or constrain
individual parameters.
Keywords:
autoderivative:
If this is set, derivatives of the function will be computed
automatically via a finite differencing procedure. If not set, then
fcn must provide the (analytical) derivatives.
Default: set (=1)
NOTE: to supply your own analytical derivatives,
explicitly pass autoderivative=0
fastnorm:
Set this keyword to select a faster algorithm to compute sum-of-square
values internally. For systems with large numbers of data points, the
standard algorithm can become prohibitively slow because it cannot be
vectorized well. By setting this keyword, MPFIT will run faster, but
it will be more prone to floating point overflows and underflows. Thus, setting
this keyword may sacrifice some stability in the fitting process.
Default: clear (=0)
ftol:
A nonnegative input variable. Termination occurs when both the actual
and predicted relative reductions in the sum of squares are at most
ftol (and status is accordingly set to 1 or 3). Therefore, ftol
measures the relative error desired in the sum of squares.
Default: 1E-10
functkw:
A dictionary which contains the parameters to be passed to the
user-supplied function specified by fcn via the standard Python
keyword dictionary mechanism. This is the way you can pass additional
data to your user-supplied function without using global variables.
Consider the following example:
if functkw = {'xval':[1.,2.,3.], 'yval':[1.,4.,9.],
'errval':[1.,1.,1.] }
then the user supplied function should be declared like this:
def myfunct(p, fjac=None, xval=None, yval=None, errval=None):
Default: {} No extra parameters are passed to the user-supplied
function.
gtol:
A nonnegative input variable. Termination occurs when the cosine of
the angle between fvec and any column of the jacobian is at most gtol
in absolute value (and status is accordingly set to 4). Therefore,
gtol measures the orthogonality desired between the function vector
and the columns of the jacobian.
Default: 1e-10
iterkw:
The keyword arguments to be passed to iterfunct via the dictionary
keyword mechanism. This should be a dictionary and is similar in
operation to FUNCTKW.
Default: {} No arguments are passed.
iterfunct:
The name of a function to be called upon each NPRINT iteration of the
MPFIT routine. It should be declared in the following way:
def iterfunct(myfunct, p, iter, fnorm, functkw=None,
parinfo=None, quiet=0, dof=None, [iterkw keywords here])
# perform custom iteration update
iterfunct must accept all three keyword parameters (FUNCTKW, PARINFO
and QUIET).
myfunct: The user-supplied function to be minimized,
p: The current set of model parameters
iter: The iteration number
functkw: The arguments to be passed to myfunct.
fnorm: The chi-squared value.
quiet: Set when no textual output should be printed.
dof: The number of degrees of freedom, normally the number of points
less the number of free parameters.
See below for documentation of parinfo.
In implementation, iterfunct can perform updates to the terminal or
graphical user interface, to provide feedback while the fit proceeds.
If the fit is to be stopped for any reason, then iterfunct should return a
a status value between -15 and -1. Otherwise it should return None
(e.g. no return statement) or 0.
In principle, iterfunct should probably not modify the parameter values,
because it may interfere with the algorithm's stability. In practice it
is allowed.
Default: an internal routine is used to print the parameter values.
Set iterfunct=None if there is no user-defined routine and you don't
want the internal default routine be called.
maxiter:
The maximum number of iterations to perform. If the number is exceeded,
then the status value is set to 5 and MPFIT returns.
Default: 200 iterations
nocovar:
Set this keyword to prevent the calculation of the covariance matrix
before returning (see COVAR)
Default: clear (=0) The covariance matrix is returned
nprint:
The frequency with which iterfunct is called. A value of 1 indicates
that iterfunct is called with every iteration, while 2 indicates every
other iteration, etc. Note that several Levenberg-Marquardt attempts
can be made in a single iteration.
Default value: 1
parinfo
Provides a mechanism for more sophisticated constraints to be placed on
parameter values. When parinfo is not passed, then it is assumed that
all parameters are free and unconstrained. Values in parinfo are never
modified during a call to MPFIT.
See description above for the structure of PARINFO.
Default value: None All parameters are free and unconstrained.
quiet:
Set this keyword when no textual output should be printed by MPFIT
damp:
A scalar number, indicating the cut-off value of residuals where
"damping" will occur. Residuals with magnitudes greater than this
number will be replaced by their hyperbolic tangent. This partially
mitigates the so-called large residual problem inherent in
least-squares solvers (as for the test problem CURVI,
http://www.maxthis.com/curviex.htm).
A value of 0 indicates no damping.
Default: 0
Note: DAMP doesn't work with autoderivative=0
xtol:
A nonnegative input variable. Termination occurs when the relative error
between two consecutive iterates is at most xtol (and status is
accordingly set to 2 or 3). Therefore, xtol measures the relative error
desired in the approximate solution.
Default: 1E-10
Outputs:
Returns an object of type mpfit. The results are attributes of this class,
e.g. mpfit.status, mpfit.errmsg, mpfit.params, npfit.niter, mpfit.covar.
.status
An integer status code is returned. All values greater than zero can
represent success (however .status == 5 may indicate failure to
converge). It can have one of the following values:
-16
A parameter or function value has become infinite or an undefined
number. This is usually a consequence of numerical overflow in the
user's model function, which must be avoided.
-15 to -1
These are error codes that either MYFUNCT or iterfunct may return to
terminate the fitting process. Values from -15 to -1 are reserved
for the user functions and will not clash with MPFIT.
0 Improper input parameters.
1 Both actual and predicted relative reductions in the sum of squares
are at most ftol.
2 Relative error between two consecutive iterates is at most xtol
3 Conditions for status = 1 and status = 2 both hold.
4 The cosine of the angle between fvec and any column of the jacobian
is at most gtol in absolute value.
5 The maximum number of iterations has been reached.
6 ftol is too small. No further reduction in the sum of squares is
possible.
7 xtol is too small. No further improvement in the approximate solution
x is possible.
8 gtol is too small. fvec is orthogonal to the columns of the jacobian
to machine precision.
.fnorm
The value of the summed squared residuals for the returned parameter
values.
.covar
The covariance matrix for the set of parameters returned by MPFIT.
The matrix is NxN where N is the number of parameters. The square root
of the diagonal elements gives the formal 1-sigma statistical errors on
the parameters if errors were treated "properly" in fcn.
Parameter errors are also returned in .perror.
To compute the correlation matrix, pcor, use this example:
cov = mpfit.covar
pcor = cov * 0.
for i in range(n):
for j in range(n):
pcor[i,j] = cov[i,j]/Numeric.sqrt(cov[i,i]*cov[j,j])
If nocovar is set or MPFIT terminated abnormally, then .covar is set to
a scalar with value None.
.errmsg
A string error or warning message is returned.
.nfev
The number of calls to MYFUNCT performed.
.niter
The number of iterations completed.
.perror
The formal 1-sigma errors in each parameter, computed from the
covariance matrix. If a parameter is held fixed, or if it touches a
boundary, then the error is reported as zero.
If the fit is unweighted (i.e. no errors were given, or the weights
were uniformly set to unity), then .perror will probably not represent
the true parameter uncertainties.
*If* you can assume that the true reduced chi-squared value is unity --
meaning that the fit is implicitly assumed to be of good quality --
then the estimated parameter uncertainties can be computed by scaling
.perror by the measured chi-squared value.
dof = len(x) - len(mpfit.params) # deg of freedom
# scaled uncertainties
pcerror = mpfit.perror * Numeric.sqrt(mpfit.fnorm / dof)
"""
self.niter = 0
self.params = None
self.covar = None
self.perror = None
self.status = 0 # Invalid input flag set while we check inputs
self.debug = debug
self.errmsg = ''
self.fastnorm = fastnorm
self.nfev = 0
self.damp = damp
self.machar = machar(double=1)
machep = self.machar.machep
if (fcn==None):
self.errmsg = "Usage: parms = mpfit('myfunt', ... )"
return
if (iterfunct == 'default'): iterfunct = self.defiter
## Parameter damping doesn't work when user is providing their own
## gradients.
if (self.damp != 0) and (autoderivative == 0):
self.errmsg = 'ERROR: keywords DAMP and AUTODERIVATIVE are mutually exclusive'
return
## Parameters can either be stored in parinfo, or x. x takes precedence if it exists
if (xall == None) and (parinfo == None):
self.errmsg = 'ERROR: must pass parameters in P or PARINFO'
return
## Be sure that PARINFO is of the right type
if (parinfo != None):
if (type(parinfo) != types.ListType):
self.errmsg = 'ERROR: PARINFO must be a list of dictionaries.'
return
else:
if (type(parinfo[0]) != types.DictionaryType):
self.errmsg = 'ERROR: PARINFO must be a list of dictionaries.'
return
if ((xall != None) and (len(xall) != len(parinfo))):
self.errmsg = 'ERROR: number of elements in PARINFO and P must agree'
return
## If the parameters were not specified at the command line, then
## extract them from PARINFO
if (xall == None):
xall = self.parinfo(parinfo, 'value')
if (xall == None):
self.errmsg = 'ERROR: either P or PARINFO(*)["value"] must be supplied.'
return
## Make sure parameters are Numeric arrays of type Numeric.Float
xall = Numeric.asarray(xall, Numeric.Float)
npar = len(xall)
self.fnorm = -1.
fnorm1 = -1.
## TIED parameters?
ptied = self.parinfo(parinfo, 'tied', default='', n=npar)
self.qanytied = 0
for i in range(npar):
ptied[i] = ptied[i].strip()
if (ptied[i] != ''): self.qanytied = 1
self.ptied = ptied
## FIXED parameters ?
pfixed = self.parinfo(parinfo, 'fixed', default=0, n=npar)
pfixed = (pfixed == 1)
for i in range(npar):
pfixed[i] = pfixed[i] or (ptied[i] != '') ## Tied parameters are also effectively fixed
## Finite differencing step, absolute and relative, and sidedness of deriv.
step = self.parinfo(parinfo, 'step', default=0., n=npar)
dstep = self.parinfo(parinfo, 'relstep', default=0., n=npar)
dside = self.parinfo(parinfo, 'mpside', default=0, n=npar)
## Maximum and minimum steps allowed to be taken in one iteration
maxstep = self.parinfo(parinfo, 'mpmaxstep', default=0., n=npar)
minstep = self.parinfo(parinfo, 'mpminstep', default=0., n=npar)
qmin = minstep * 0 ## Remove minstep for now!!
qmax = maxstep != 0
wh = Numeric.nonzero(((qmin!=0.) & (qmax!=0.)) & (maxstep < minstep))
if (len(wh) > 0):
self.errmsg = 'ERROR: MPMINSTEP is greater than MPMAXSTEP'
return
wh = Numeric.nonzero((qmin!=0.) & (qmax!=0.))
qminmax = len(wh > 0)
## Finish up the free parameters
ifree = Numeric.nonzero(pfixed != 1)
nfree = len(ifree)
if nfree == 0:
self.errmsg = 'ERROR: no free parameters'
return
## Compose only VARYING parameters
self.params = xall ## self.params is the set of parameters to be returned
x = Numeric.take(self.params, ifree) ## x is the set of free parameters
## LIMITED parameters ?
limited = self.parinfo(parinfo, 'limited', default=[0,0])
limits = self.parinfo(parinfo, 'limits', default=[0.,0.])
if (limited != None) and (limits != None):
## Error checking on limits in parinfo
wh = Numeric.nonzero((limited[:,0] & (xall < limits[:,0])) |
(limited[:,1] & (xall > limits[:,1])))
if (len(wh) > 0):
self.errmsg = 'ERROR: parameters are not within PARINFO limits'
return
wh = Numeric.nonzero((limited[:,0] & limited[:,1]) &
(limits[:,0] >= limits[:,1]) &
(pfixed == 0))
if (len(wh) > 0):
self.errmsg = 'ERROR: PARINFO parameter limits are not consistent'
return
## Transfer structure values to local variables
qulim = Numeric.take(limited[:,1], ifree)
ulim = Numeric.take(limits [:,1], ifree)
qllim = Numeric.take(limited[:,0], ifree)
llim = Numeric.take(limits [:,0], ifree)
wh = Numeric.nonzero((qulim!=0.) | (qllim!=0.))
if (len(wh) > 0): qanylim = 1
else: qanylim = 0
else:
## Fill in local variables with dummy values
qulim = Numeric.zeros(nfree)
ulim = x * 0.
qllim = qulim
llim = x * 0.
qanylim = 0
n = len(x)
## Check input parameters for errors
if ((n < 0) or (ftol <= 0) or (xtol <= 0) or (gtol <= 0)
or (maxiter <= 0) or (factor <= 0)):
self.errmsg = 'ERROR: input keywords are inconsistent'
return
if (rescale != 0):
self.errmsg = 'ERROR: DIAG parameter scales are inconsistent'
if (len(diag) < n): return
wh = Numeric.nonzero(diag <= 0)
if (len(wh) > 0): return
self.errmsg = ''
# Make sure x is a Numeric array of type Numeric.Float
x = Numeric.asarray(x, Numeric.Float)
[self.status, fvec] = self.call(fcn, self.params, functkw)
if (self.status < 0):
self.errmsg = 'ERROR: first call to "'+str(fcn)+'" failed'
return
m = len(fvec)
if (m < n):
self.errmsg = 'ERROR: number of parameters must not exceed data'
return
self.fnorm = self.enorm(fvec)
## Initialize Levelberg-Marquardt parameter and iteration counter
par = 0.
self.niter = 1
qtf = x * 0.
self.status = 0
## Beginning of the outer loop
while(1):
## If requested, call fcn to enable printing of iterates
Numeric.put(self.params, ifree, x)
if (self.qanytied): self.params = self.tie(self.params, ptied)
if (nprint > 0) and (iterfunct != None):
if (((self.niter-1) % nprint) == 0):
mperr = 0
xnew0 = self.params.copy()
dof = max(len(fvec) - len(x), 0)
status = iterfunct(fcn, self.params, self.niter, self.fnorm**2,
functkw=functkw, parinfo=parinfo, quiet=quiet,
dof=dof, **iterkw)
if (status != None): self.status = status
## Check for user termination
if (self.status < 0):
self.errmsg = 'WARNING: premature termination by ' + str(iterfunct)
return
## If parameters were changed (grrr..) then re-tie
if (max(abs(xnew0-self.params)) > 0):
if (self.qanytied): self.params = self.tie(self.params, ptied)
x = Numeric.take(self.params, ifree)
## Calculate the jacobian matrix
self.status = 2
catch_msg = 'calling MPFIT_FDJAC2'
fjac = self.fdjac2(fcn, x, fvec, step, qulim, ulim, dside,
epsfcn=epsfcn,
autoderivative=autoderivative, dstep=dstep,
functkw=functkw, ifree=ifree, xall=self.params)
if (fjac == None):
self.errmsg = 'WARNING: premature termination by FDJAC2'
return
## Determine if any of the parameters are pegged at the limits
if (qanylim):
catch_msg = 'zeroing derivatives of pegged parameters'
whlpeg = Numeric.nonzero(qllim & (x == llim))
nlpeg = len(whlpeg)
whupeg = Numeric.nonzero(qulim & (x == ulim))
nupeg = len(whupeg)
## See if any "pegged" values should keep their derivatives
if (nlpeg > 0):
## Total derivative of sum wrt lower pegged parameters
for i in range(nlpeg):
sum = Numeric.sum(fvec * fjac[:,whlpeg[i]])
if (sum > 0): fjac[:,whlpeg[i]] = 0
if (nupeg > 0):
## Total derivative of sum wrt upper pegged parameters
for i in range(nupeg):
sum = Numeric.sum(fvec * fjac[:,whupeg[i]])
if (sum < 0): fjac[:,whupeg[i]] = 0
## Compute the QR factorization of the jacobian
[fjac, ipvt, wa1, wa2] = self.qrfac(fjac, pivot=1)
## On the first iteration if "diag" is unspecified, scale
## according to the norms of the columns of the initial jacobian
catch_msg = 'rescaling diagonal elements'
if (self.niter == 1):
if ((rescale==0) or (len(diag) < n)):
diag = wa2.copy()
wh = Numeric.nonzero(diag == 0)
Numeric.put(diag, wh, 1.)
## On the first iteration, calculate the norm of the scaled x
## and initialize the step bound delta
wa3 = diag * x
xnorm = self.enorm(wa3)
delta = factor*xnorm
if (delta == 0.): delta = factor
## Form (q transpose)*fvec and store the first n components in qtf
catch_msg = 'forming (q transpose)*fvec'
wa4 = fvec.copy()
for j in range(n):
lj = ipvt[j]
temp3 = fjac[j,lj]
if (temp3 != 0):
fj = fjac[j:,lj]
wj = wa4[j:len(wa4)]
## *** optimization wa4(j:*)
wa4[j:len(wa4)] = wj - fj * Numeric.sum(fj*wj) / temp3
fjac[j,lj] = wa1[j]
qtf[j] = wa4[j]
## From this point on, only the square matrix, consisting of the
## triangle of R, is needed.
fjac = fjac[0:n, 0:n]
fjac.shape = [n, n]
temp = fjac.copy()
for i in range(n):
temp[:,i] = fjac[:, ipvt[i]]
fjac = temp.copy()
## Check for overflow. This should be a cheap test here since FJAC
## has been reduced to a (small) square matrix, and the test is
## O(N^2).
#wh = where(finite(fjac) EQ 0, ct)
#if ct GT 0 then goto, FAIL_OVERFLOW
## Compute the norm of the scaled gradient
catch_msg = 'computing the scaled gradient'
gnorm = 0.
if (self.fnorm != 0):
for j in range(n):
l = ipvt[j]
if (wa2[l] != 0):
sum = Numeric.sum(fjac[0:j+1,j]*qtf[0:j+1])/self.fnorm
gnorm = max([gnorm,abs(sum/wa2[l])])
## Test for convergence of the gradient norm
if (gnorm <= gtol):
self.status = 4
return
## Rescale if necessary
if (rescale == 0):
diag = Numeric.choose(diag>wa2, (wa2, diag))
## Beginning of the inner loop
while(1):
## Determine the levenberg-marquardt parameter
catch_msg = 'calculating LM parameter (MPFIT_)'
[fjac, par, wa1, wa2] = self.lmpar(fjac, ipvt, diag, qtf,
delta, wa1, wa2, par=par)
## Store the direction p and x+p. Calculate the norm of p
wa1 = -wa1
if (qanylim == 0) and (qminmax == 0):
## No parameter limits, so just move to new position WA2
alpha = 1.
wa2 = x + wa1
else:
## Respect the limits. If a step were to go out of bounds, then
## we should take a step in the same direction but shorter distance.
## The step should take us right to the limit in that case.
alpha = 1.
if (qanylim):
## Do not allow any steps out of bounds
catch_msg = 'checking for a step out of bounds'
if (nlpeg > 0):
Numeric.put(wa1, whlpeg, Numeric.clip(
Numeric.take(wa1, whlpeg), 0., max(wa1)))
if (nupeg > 0):
Numeric.put(wa1, whupeg, Numeric.clip(
Numeric.take(wa1, whupeg), min(wa1), 0.))
dwa1 = abs(wa1) > machep
whl = Numeric.nonzero(((dwa1!=0.) & qllim) & ((x + wa1) < llim))
if (len(whl) > 0):
t = ((Numeric.take(llim, whl) - Numeric.take(x, whl)) /
Numeric.take(wa1, whl))
alpha = min(alpha, min(t))
whu = Numeric.nonzero(((dwa1!=0.) & qulim) & ((x + wa1) > ulim))
if (len(whu) > 0):
t = ((Numeric.take(ulim, whu) - Numeric.take(x, whu)) /
Numeric.take(wa1, whu))
alpha = min(alpha, min(t))
## Obey any max step values.
if (qminmax):
nwa1 = wa1 * alpha
whmax = Numeric.nonzero((qmax != 0.) & (maxstep > 0))
if (len(whmax) > 0):
mrat = max(Numeric.take(nwa1, whmax) /
Numeric.take(maxstep, whmax))
if (mrat > 1): alpha = alpha / mrat
## Scale the resulting vector
wa1 = wa1 * alpha
wa2 = x + wa1
## Adjust the final output values. If the step put us exactly
## on a boundary, make sure it is exact.
wh = Numeric.nonzero((qulim!=0.) & (wa2 >= ulim*(1-machep)))
if (len(wh) > 0): Numeric.put(wa2, wh, Numeric.take(ulim, wh))
wh = Numeric.nonzero((qllim!=0.) & (wa2 <= llim*(1+machep)))
if (len(wh) > 0): Numeric.put(wa2, wh, Numeric.take(llim, wh))
# endelse
wa3 = diag * wa1
pnorm = self.enorm(wa3)
## On the first iteration, adjust the initial step bound
if (self.niter == 1): delta = min([delta,pnorm])
Numeric.put(self.params, ifree, wa2)
## Evaluate the function at x+p and calculate its norm
mperr = 0
catch_msg = 'calling '+str(fcn)
[self.status, wa4] = self.call(fcn, self.params, functkw)
if (self.status < 0):
self.errmsg = 'WARNING: premature termination by "'+fcn+'"'
return
fnorm1 = self.enorm(wa4)
## Compute the scaled actual reduction
catch_msg = 'computing convergence criteria'
actred = -1.
if ((0.1 * fnorm1) < self.fnorm): actred = - (fnorm1/self.fnorm)**2 + 1.
## Compute the scaled predicted reduction and the scaled directional
## derivative
for j in range(n):
wa3[j] = 0
wa3[0:j+1] = wa3[0:j+1] + fjac[0:j+1,j]*wa1[ipvt[j]]
## Remember, alpha is the fraction of the full LM step actually
## taken
temp1 = self.enorm(alpha*wa3)/self.fnorm
temp2 = (Numeric.sqrt(alpha*par)*pnorm)/self.fnorm
prered = temp1*temp1 + (temp2*temp2)/0.5
dirder = -(temp1*temp1 + temp2*temp2)
## Compute the ratio of the actual to the predicted reduction.
ratio = 0.
if (prered != 0): ratio = actred/prered
## Update the step bound
if (ratio <= 0.25):
if (actred >= 0): temp = .5
else: temp = .5*dirder/(dirder + .5*actred)
if ((0.1*fnorm1) >= self.fnorm) or (temp < 0.1): temp = 0.1
delta = temp*min([delta,pnorm/0.1])
par = par/temp
else:
if (par == 0) or (ratio >= 0.75):
delta = pnorm/.5
par = .5*par
## Test for successful iteration
if (ratio >= 0.0001):
## Successful iteration. Update x, fvec, and their norms
x = wa2
wa2 = diag * x
fvec = wa4
xnorm = self.enorm(wa2)
self.fnorm = fnorm1
self.niter = self.niter + 1
## Tests for convergence
if ((abs(actred) <= ftol) and (prered <= ftol)
and (0.5 * ratio <= 1)): self.status = 1
if delta <= xtol*xnorm: self.status = 2
if ((abs(actred) <= ftol) and (prered <= ftol)
and (0.5 * ratio <= 1) and (self.status == 2)): self.status = 3
if (self.status != 0): break
## Tests for termination and stringent tolerances
if (self.niter >= maxiter): self.status = 5
if ((abs(actred) <= machep) and (prered <= machep)
and (0.5*ratio <= 1)): self.status = 6
if delta <= machep*xnorm: self.status = 7
if gnorm <= machep: self.status = 8
if (self.status != 0): break
## End of inner loop. Repeat if iteration unsuccessful
if (ratio >= 0.0001): break
## Check for over/underflow - SKIP FOR NOW
##wh = where(finite(wa1) EQ 0 OR finite(wa2) EQ 0 OR finite(x) EQ 0, ct)
##if ct GT 0 OR finite(ratio) EQ 0 then begin
## errmsg = ('ERROR: parameter or function value(s) have become '+$
## 'infinite# check model function for over- '+$
## 'and underflow')
## self.status = -16
## break
if (self.status != 0): break;
## End of outer loop.
catch_msg = 'in the termination phase'
## Termination, either normal or user imposed.
if (len(self.params) == 0):
return
if (nfree == 0): self.params = xall.copy()
else: Numeric.put(self.params, ifree, x)
if (nprint > 0) and (self.status > 0):
catch_msg = 'calling ' + str(fcn)
[status, fvec] = self.call(fcn, self.params, functkw)
catch_msg = 'in the termination phase'
self.fnorm = self.enorm(fvec)
if ((self.fnorm != None) and (fnorm1 != None)):
self.fnorm = max([self.fnorm, fnorm1])
self.fnorm = self.fnorm**2.
self.covar = None
self.perror = None
## (very carefully) set the covariance matrix COVAR
if ((self.status > 0) and (nocovar==0) and (n != None)
and (fjac != None) and (ipvt != None)):
sz = Numeric.shape(fjac)
if ((n > 0) and (sz[0] >= n) and (sz[1] >= n)
and (len(ipvt) >= n)):
catch_msg = 'computing the covariance matrix'
cv = self.calc_covar(fjac[0:n,0:n], ipvt[0:n])
cv.shape = [n, n]
nn = len(xall)
## Fill in actual covariance matrix, accounting for fixed
## parameters.
self.covar = Numeric.zeros([nn, nn], Numeric.Float)
for i in range(n):
indices = ifree+ifree[i]*n
Numeric.put(self.covar, indices, cv[:,i])
## Compute errors in parameters
catch_msg = 'computing parameter errors'
self.perror = Numeric.zeros(nn, Numeric.Float)
d = Numeric.diagonal(self.covar)
wh = Numeric.nonzero(d >= 0)
if len(wh) > 0:
Numeric.put(self.perror, wh, Numeric.sqrt(Numeric.take(d, wh)))
return
## Default procedure to be called every iteration. It simply prints
## the parameter values.
def defiter(self, fcn, x, iter, fnorm=None, functkw=None,
quiet=0, iterstop=None, parinfo=None,
format=None, pformat='%.10g', dof=1):
if (self.debug): print 'Entering defiter...'
if (quiet): return
if (fnorm == None):
[status, fvec] = self.call(fcn, x, functkw)
fnorm = self.enorm(fvec)**2
## Determine which parameters to print
nprint = len(x)
print "Iter ", ('%6i' % iter)," CHI-SQUARE = ",('%.10g' % fnorm)," DOF = ", ('%i' % dof)
for i in range(nprint):
if (parinfo != None) and (parinfo[i].has_key('parname')):
p = ' ' + parinfo[i]['parname'] + ' = '
else:
p = ' P' + str(i) + ' = '
if (parinfo != None) and (parinfo[i].has_key('mpprint')):
iprint = parinfo[i]['mpprint']
else:
iprint = 1
if (iprint):
print p + (pformat % x[i]) + ' '
return(0)
## DO_ITERSTOP:
## if keyword_set(iterstop) then begin
## k = get_kbrd(0)
## if k EQ string(byte(7)) then begin
## message, 'WARNING: minimization not complete', /info
## print, 'Do you want to terminate this procedure? (y/n)', $
## format='(A,$)'
## k = ''
## read, k
## if strupcase(strmid(k,0,1)) EQ 'Y' then begin
## message, 'WARNING: Procedure is terminating.', /info
## mperr = -1
## endif
## endif
## endif
## Procedure to parse the parameter values in PARINFO, which is a list of dictionaries
def parinfo(self, parinfo=None, key='a', default=None, n=0):
if (self.debug): print 'Entering parinfo...'
if (n == 0) and (parinfo != None): n = len(parinfo)
if (n == 0):
values = default
return(values)
values = []
for i in range(n):
if ((parinfo != None) and (parinfo[i].has_key(key))):
values.append(parinfo[i][key])
else:
values.append(default)
# Convert to numeric arrays if possible
test = default
if (type(default) == types.ListType): test=default[0]
if (type(test) == types.IntType):
values = Numeric.asarray(values, Numeric.Int)
elif (type(test) == types.FloatType):
values = Numeric.asarray(values, Numeric.Float)
return(values)
## Call user function or procedure, with _EXTRA or not, with
## derivatives or not.
def call(self, fcn, x, functkw, fjac=None):
if (self.debug): print 'Entering call...'
if (self.qanytied): x = self.tie(x, self.ptied)
self.nfev = self.nfev + 1
if (fjac == None):
[status, f] = fcn(x, fjac=fjac, **functkw)
if (self.damp > 0):
## Apply the damping if requested. This replaces the residuals
## with their hyperbolic tangent. Thus residuals larger than
## DAMP are essentially clipped.
f = Numeric.tanh(f/self.damp)
return([status, f])
else:
return(fcn(x, fjac=fjac, **functkw))
def enorm(self, vec):
if (self.debug): print 'Entering enorm...'
## NOTE: it turns out that, for systems that have a lot of data
## points, this routine is a big computing bottleneck. The extended
## computations that need to be done cannot be effectively
## vectorized. The introduction of the FASTNORM configuration
## parameter allows the user to select a faster routine, which is
## based on TOTAL() alone.
# Very simple-minded sum-of-squares
if (self.fastnorm):
ans = Numeric.sqrt(Numeric.sum(vec*vec))
else:
agiant = self.machar.rgiant / len(vec)
adwarf = self.machar.rdwarf * len(vec)
## This is hopefully a compromise between speed and robustness.
## Need to do this because of the possibility of over- or underflow.
mx = max(vec)
mn = min(vec)
mx = max(abs(mx), abs(mn))
if mx == 0: return(vec[0]*0.)
if mx > agiant or mx < adwarf:
ans = mx * Numeric.sqrt(Numeric.sum((vec/mx)*(vec/mx)))
else:
ans = Numeric.sqrt(Numeric.sum(vec*vec))
return(ans)
def fdjac2(self, fcn, x, fvec, step=None, ulimited=None, ulimit=None, dside=None,
epsfcn=None, autoderivative=1,
functkw=None, xall=None, ifree=None, dstep=None):
if (self.debug): print 'Entering fdjac2...'
machep = self.machar.machep
if epsfcn == None: epsfcn = machep
if xall == None: xall = x
if ifree == None: ifree = Numeric.arange(len(xall))
if step == None: step = x * 0.
nall = len(xall)
eps = Numeric.sqrt(max([epsfcn, machep]))
m = len(fvec)
n = len(x)
## Compute analytical derivative if requested
if (autoderivative == 0):
mperr = 0
fjac = Numeric.zeros(nall, Numeric.Float)
Numeric.Put(fjac, ifree, 1.0) ## Specify which parameters need derivatives
[status, fp] = self.call(fcn, xall, functkw, fjac=fjac)
if len(fjac) != m*nall:
print 'ERROR: Derivative matrix was not computed properly.'
return(None)
## This definition is c1onsistent with CURVEFIT
## Sign error found (thanks Jesus Fernandez <fernande@irm.chu-caen.fr>)
fjac.shape = [m,nall]
fjac = -fjac
## Select only the free parameters
if len(ifree) < nall:
fjac = fjac[:,ifree]
fjac.shape = [m, n]
return(fjac)
fjac = Numeric.zeros([m, n], Numeric.Float)
h = eps * abs(x)
## if STEP is given, use that
if step != None:
stepi = Numeric.take(step, ifree)
wh = Numeric.nonzero(stepi > 0)
if (len(wh) > 0): Numeric.put(h, wh, Numeric.take(stepi, wh))
## if relative step is given, use that
if (len(dstep) > 0):
dstepi = Numeric.take(dstep, ifree)
wh = Numeric.nonzero(dstepi > 0)
if len(wh) > 0: Numeric.put(h, wh, abs(Numeric.take(dstepi,wh)*Numeric.take(x,wh)))
## In case any of the step values are zero
wh = Numeric.nonzero(h == 0)
if len(wh) > 0: Numeric.put(h, wh, eps)
## Reverse the sign of the step if we are up against the parameter
## limit, or if the user requested it.
mask = dside == -1
if len(ulimited) > 0 and len(ulimit) > 0:
mask = mask or (ulimited and (x > ulimit-h))
wh = Numeric.nonzero(mask)
if len(wh) > 0: Numeric.put(h, wh, -Numeric.take(h, wh))
## Loop through parameters, computing the derivative for each
for j in range(n):
xp = xall.copy()
xp[ifree[j]] = xp[ifree[j]] + h[j]
[status, fp] = self.call(fcn, xp, functkw)
if (status < 0): return(None)
if abs(dside[j]) <= 1:
## COMPUTE THE ONE-SIDED DERIVATIVE
## Note optimization fjac(0:*,j)
fjac[0:,j] = (fp-fvec)/h[j]
else:
## COMPUTE THE TWO-SIDED DERIVATIVE
xp[ifree[j]] = xall[ifree[j]] - h[j]
mperr = 0
[status, fm] = self.call(fcn, xp, functkw)
if (status < 0): return(None)
## Note optimization fjac(0:*,j)
fjac[0:,j] = (fp-fm)/(2*h[j])
return(fjac)
# Original FORTRAN documentation
# **********
#
# subroutine qrfac
#
# this subroutine uses householder transformations with column
# pivoting (optional) to compute a qr factorization of the
# m by n matrix a. that is, qrfac determines an orthogonal
# matrix q, a permutation matrix p, and an upper trapezoidal
# matrix r with diagonal elements of nonincreasing magnitude,
# such that a*p = q*r. the householder transformation for
# column k, k = 1,2,...,min(m,n), is of the form
#
# t
# i - (1/u(k))*u*u
#
# where u has zeros in the first k-1 positions. the form of
# this transformation and the method of pivoting first
# appeared in the corresponding linpack subroutine.
#
# the subroutine statement is
#
# subroutine qrfac(m,n,a,lda,pivot,ipvt,lipvt,rdiag,acnorm,wa)
#
# where
#
# m is a positive integer input variable set to the number
# of rows of a.
#
# n is a positive integer input variable set to the number
# of columns of a.
#
# a is an m by n array. on input a contains the matrix for
# which the qr factorization is to be computed. on output
# the strict upper trapezoidal part of a contains the strict
# upper trapezoidal part of r, and the lower trapezoidal
# part of a contains a factored form of q (the non-trivial
# elements of the u vectors described above).
#
# lda is a positive integer input variable not less than m
# which specifies the leading dimension of the array a.
#
# pivot is a logical input variable. if pivot is set true,
# then column pivoting is enforced. if pivot is set false,
# then no column pivoting is done.
#
# ipvt is an integer output array of length lipvt. ipvt
# defines the permutation matrix p such that a*p = q*r.
# column j of p is column ipvt(j) of the identity matrix.
# if pivot is false, ipvt is not referenced.
#
# lipvt is a positive integer input variable. if pivot is false,
# then lipvt may be as small as 1. if pivot is true, then
# lipvt must be at least n.
#
# rdiag is an output array of length n which contains the
# diagonal elements of r.
#
# acnorm is an output array of length n which contains the
# norms of the corresponding columns of the input matrix a.
# if this information is not needed, then acnorm can coincide
# with rdiag.
#
# wa is a work array of length n. if pivot is false, then wa
# can coincide with rdiag.
#
# subprograms called
#
# minpack-supplied ... dpmpar,enorm
#
# fortran-supplied ... dmax1,dsqrt,min0
#
# argonne national laboratory. minpack project. march 1980.
# burton s. garbow, kenneth e. hillstrom, jorge j. more
#
# **********
# NOTE: in IDL the factors appear slightly differently than described
# above. The matrix A is still m x n where m >= n.
#
# The "upper" triangular matrix R is actually stored in the strict
# lower left triangle of A under the standard notation of IDL.
#
# The reflectors that generate Q are in the upper trapezoid of A upon
# output.
#
# EXAMPLE: decompose the matrix [[9.,2.,6.],[4.,8.,7.]]
# aa = [[9.,2.,6.],[4.,8.,7.]]
# mpfit_qrfac, aa, aapvt, rdiag, aanorm
# IDL> print, aa
# 1.81818* 0.181818* 0.545455*
# -8.54545+ 1.90160* 0.432573*
# IDL> print, rdiag
# -11.0000+ -7.48166+
#
# The components marked with a * are the components of the
# reflectors, and those marked with a + are components of R.
#
# To reconstruct Q and R we proceed as follows. First R.
# r = fltarr(m, n)
# for i = 0, n-1 do r(0:i,i) = aa(0:i,i) # fill in lower diag
# r(lindgen(n)*(m+1)) = rdiag
#
# Next, Q, which are composed from the reflectors. Each reflector v
# is taken from the upper trapezoid of aa, and converted to a matrix
# via (I - 2 vT . v / (v . vT)).
#
# hh = ident ## identity matrix
# for i = 0, n-1 do begin
# v = aa(*,i) & if i GT 0 then v(0:i-1) = 0 ## extract reflector
# hh = hh ## (ident - 2*(v # v)/total(v * v)) ## generate matrix
# endfor
#
# Test the result:
# IDL> print, hh ## transpose(r)
# 9.00000 4.00000
# 2.00000 8.00000
# 6.00000 7.00000
#
# Note that it is usually never necessary to form the Q matrix
# explicitly, and MPFIT does not.
def qrfac(self, a, pivot=0):
if (self.debug): print 'Entering qrfac...'
machep = self.machar.machep
sz = Numeric.shape(a)
m = sz[0]
n = sz[1]
## Compute the initial column norms and initialize arrays
acnorm = Numeric.zeros(n, Numeric.Float)
for j in range(n):
acnorm[j] = self.enorm(a[:,j])
rdiag = acnorm.copy()
wa = rdiag.copy()
ipvt = Numeric.arange(n)
## Reduce a to r with householder transformations
minmn = min([m,n])
for j in range(minmn):
if (pivot != 0):
## Bring the column of largest norm into the pivot position
rmax = max(rdiag[j:minmn])
kmax = Numeric.nonzero(rdiag[j:] == rmax)
ct = len(kmax)
kmax = kmax + j
if ct > 0:
kmax = kmax[0]
## Exchange rows via the pivot only. Avoid actually exchanging
## the rows, in case there is lots of memory transfer. The
## exchange occurs later, within the body of MPFIT, after the
## extraneous columns of the matrix have been shed.
if kmax != j:
temp = ipvt[j] ; ipvt[j] = ipvt[kmax] ; ipvt[kmax] = temp
rdiag[kmax] = rdiag[j]
wa[kmax] = wa[j]
## Compute the householder transformation to reduce the jth
## column of A to a multiple of the jth unit vector
lj = ipvt[j]
ajj = a[j:,lj]
ajnorm = self.enorm(ajj)
if ajnorm == 0: break
if a[j,j] < 0: ajnorm = -ajnorm
ajj = ajj / ajnorm
ajj[0] = ajj[0] + 1
## *** Note optimization a(j:*,j)
a[j:,lj] = ajj
## Apply the transformation to the remaining columns
## and update the norms
## NOTE to SELF: tried to optimize this by removing the loop,
## but it actually got slower. Reverted to "for" loop to keep
## it simple.
if (j+1 < n):
for k in range(j+1, n):
lk = ipvt[k]
ajk = a[j:,lk]
## *** Note optimization a(j:*,lk)
## (corrected 20 Jul 2000)
if a[j,lj] != 0:
a[j:,lk] = ajk - ajj * Numeric.sum(ajk*ajj)/a[j,lj]
if ((pivot != 0) and (rdiag[k] != 0)):
temp = a[j,lk]/rdiag[k]
rdiag[k] = rdiag[k] * Numeric.sqrt(max((1.-temp**2), 0.))
temp = rdiag[k]/wa[k]
if ((0.05*temp*temp) <= machep):
rdiag[k] = self.enorm(a[j+1:,lk])
wa[k] = rdiag[k]
rdiag[j] = -ajnorm
return([a, ipvt, rdiag, acnorm])
# Original FORTRAN documentation
# **********
#
# subroutine qrsolv
#
# given an m by n matrix a, an n by n diagonal matrix d,
# and an m-vector b, the problem is to determine an x which
# solves the system
#
# a*x = b , d*x = 0 ,
#
# in the least squares sense.
#
# this subroutine completes the solution of the problem
# if it is provided with the necessary information from the
# factorization, with column pivoting, of a. that is, if
# a*p = q*r, where p is a permutation matrix, q has orthogonal
# columns, and r is an upper triangular matrix with diagonal
# elements of nonincreasing magnitude, then qrsolv expects
# the full upper triangle of r, the permutation matrix p,
# and the first n components of (q transpose)*b. the system
# a*x = b, d*x = 0, is then equivalent to
#
# t t
# r*z = q *b , p *d*p*z = 0 ,
#
# where x = p*z. if this system does not have full rank,
# then a least squares solution is obtained. on output qrsolv
# also provides an upper triangular matrix s such that
#
# t t t
# p *(a *a + d*d)*p = s *s .
#
# s is computed within qrsolv and may be of separate interest.
#
# the subroutine statement is
#
# subroutine qrsolv(n,r,ldr,ipvt,diag,qtb,x,sdiag,wa)
#
# where
#
# n is a positive integer input variable set to the order of r.
#
# r is an n by n array. on input the full upper triangle
# must contain the full upper triangle of the matrix r.
# on output the full upper triangle is unaltered, and the
# strict lower triangle contains the strict upper triangle
# (transposed) of the upper triangular matrix s.
#
# ldr is a positive integer input variable not less than n
# which specifies the leading dimension of the array r.
#
# ipvt is an integer input array of length n which defines the
# permutation matrix p such that a*p = q*r. column j of p
# is column ipvt(j) of the identity matrix.
#
# diag is an input array of length n which must contain the
# diagonal elements of the matrix d.
#
# qtb is an input array of length n which must contain the first
# n elements of the vector (q transpose)*b.
#
# x is an output array of length n which contains the least
# squares solution of the system a*x = b, d*x = 0.
#
# sdiag is an output array of length n which contains the
# diagonal elements of the upper triangular matrix s.
#
# wa is a work array of length n.
#
# subprograms called
#
# fortran-supplied ... dabs,dsqrt
#
# argonne national laboratory. minpack project. march 1980.
# burton s. garbow, kenneth e. hillstrom, jorge j. more
#
def qrsolv(self, r, ipvt, diag, qtb, sdiag):
if (self.debug): print 'Entering qrsolv...'
sz = Numeric.shape(r)
m = sz[0]
n = sz[1]
## copy r and (q transpose)*b to preserve input and initialize s.
## in particular, save the diagonal elements of r in x.
for j in range(n):
r[j:n,j] = r[j,j:n]
x = Numeric.diagonal(r)
wa = qtb.copy()
## Eliminate the diagonal matrix d using a givens rotation
for j in range(n):
l = ipvt[j]
if (diag[l] == 0): break
sdiag[j:] = 0
sdiag[j] = diag[l]
## The transformations to eliminate the row of d modify only a
## single element of (q transpose)*b beyond the first n, which
## is initially zero.
qtbpj = 0.
for k in range(j,n):
if (sdiag[k] == 0): break
if (abs(r[k,k]) < abs(sdiag[k])):
cotan = r[k,k]/sdiag[k]
sine = 0.5/Numeric.sqrt(.25 + .25*cotan*cotan)
cosine = sine*cotan
else:
tang = sdiag[k]/r[k,k]
cosine = 0.5/Numeric.sqrt(.25 + .25*tang*tang)
sine = cosine*tang
## Compute the modified diagonal element of r and the
## modified element of ((q transpose)*b,0).
r[k,k] = cosine*r[k,k] + sine*sdiag[k]
temp = cosine*wa[k] + sine*qtbpj
qtbpj = -sine*wa[k] + cosine*qtbpj
wa[k] = temp
## Accumulate the transformation in the row of s
if (n > k+1):
temp = cosine*r[k+1:n,k] + sine*sdiag[k+1:n]
sdiag[k+1:n] = -sine*r[k+1:n,k] + cosine*sdiag[k+1:n]
r[k+1:n,k] = temp
sdiag[j] = r[j,j]
r[j,j] = x[j]
## Solve the triangular system for z. If the system is singular
## then obtain a least squares solution
nsing = n
wh = Numeric.nonzero(sdiag == 0)
if (len(wh) > 0):
nsing = wh[0]
wa[nsing:] = 0
if (nsing >= 1):
wa[nsing-1] = wa[nsing-1]/sdiag[nsing-1] ## Degenerate case
## *** Reverse loop ***
for j in range(nsing-2,-1,-1):
sum = Numeric.sum(r[j+1:nsing,j]*wa[j+1:nsing])
wa[j] = (wa[j]-sum)/sdiag[j]
## Permute the components of z back to components of x
Numeric.put(x, ipvt, wa)
return(r, x, sdiag)
# Original FORTRAN documentation
#
# subroutine lmpar
#
# given an m by n matrix a, an n by n nonsingular diagonal
# matrix d, an m-vector b, and a positive number delta,
# the problem is to determine a value for the parameter
# par such that if x solves the system
#
# a*x = b , sqrt(par)*d*x = 0 ,
#
# in the least squares sense, and dxnorm is the euclidean
# norm of d*x, then either par is zero and
#
# (dxnorm-delta) .le. 0.1*delta ,
#
# or par is positive and
#
# abs(dxnorm-delta) .le. 0.1*delta .
#
# this subroutine completes the solution of the problem
# if it is provided with the necessary information from the
# qr factorization, with column pivoting, of a. that is, if
# a*p = q*r, where p is a permutation matrix, q has orthogonal
# columns, and r is an upper triangular matrix with diagonal
# elements of nonincreasing magnitude, then lmpar expects
# the full upper triangle of r, the permutation matrix p,
# and the first n components of (q transpose)*b. on output
# lmpar also provides an upper triangular matrix s such that
#
# t t t
# p *(a *a + par*d*d)*p = s *s .
#
# s is employed within lmpar and may be of separate interest.
#
# only a few iterations are generally needed for convergence
# of the algorithm. if, however, the limit of 10 iterations
# is reached, then the output par will contain the best
# value obtained so far.
#
# the subroutine statement is
#
# subroutine lmpar(n,r,ldr,ipvt,diag,qtb,delta,par,x,sdiag,
# wa1,wa2)
#
# where
#
# n is a positive integer input variable set to the order of r.
#
# r is an n by n array. on input the full upper triangle
# must contain the full upper triangle of the matrix r.
# on output the full upper triangle is unaltered, and the
# strict lower triangle contains the strict upper triangle
# (transposed) of the upper triangular matrix s.
#
# ldr is a positive integer input variable not less than n
# which specifies the leading dimension of the array r.
#
# ipvt is an integer input array of length n which defines the
# permutation matrix p such that a*p = q*r. column j of p
# is column ipvt(j) of the identity matrix.
#
# diag is an input array of length n which must contain the
# diagonal elements of the matrix d.
#
# qtb is an input array of length n which must contain the first
# n elements of the vector (q transpose)*b.
#
# delta is a positive input variable which specifies an upper
# bound on the euclidean norm of d*x.
#
# par is a nonnegative variable. on input par contains an
# initial estimate of the levenberg-marquardt parameter.
# on output par contains the final estimate.
#
# x is an output array of length n which contains the least
# squares solution of the system a*x = b, sqrt(par)*d*x = 0,
# for the output par.
#
# sdiag is an output array of length n which contains the
# diagonal elements of the upper triangular matrix s.
#
# wa1 and wa2 are work arrays of length n.
#
# subprograms called
#
# minpack-supplied ... dpmpar,enorm,qrsolv
#
# fortran-supplied ... dabs,dmax1,dmin1,dsqrt
#
# argonne national laboratory. minpack project. march 1980.
# burton s. garbow, kenneth e. hillstrom, jorge j. more
#
def lmpar(self, r, ipvt, diag, qtb, delta, x, sdiag, par=None):
if (self.debug): print 'Entering lmpar...'
dwarf = self.machar.minnum
sz = Numeric.shape(r)
m = sz[0]
n = sz[1]
## Compute and store in x the gauss-newton direction. If the
## jacobian is rank-deficient, obtain a least-squares solution
nsing = n
wa1 = qtb.copy()
wh = Numeric.nonzero(Numeric.diagonal(r) == 0)
if len(wh) > 0:
nsing = wh[0]
wa1[wh[0]:] = 0
if nsing > 1:
## *** Reverse loop ***
for j in range(nsing-1,-1,-1):
wa1[j] = wa1[j]/r[j,j]
if (j-1 >= 0):
wa1[0:j] = wa1[0:j] - r[0:j,j]*wa1[j]
## Note: ipvt here is a permutation array
Numeric.put(x, ipvt, wa1)
## Initialize the iteration counter. Evaluate the function at the
## origin, and test for acceptance of the gauss-newton direction
iter = 0
wa2 = diag * x
dxnorm = self.enorm(wa2)
fp = dxnorm - delta
if (fp <= 0.1*delta):
return[r, 0., x, sdiag]
## If the jacobian is not rank deficient, the newton step provides a
## lower bound, parl, for the zero of the function. Otherwise set
## this bound to zero.
parl = 0.
if nsing >= n:
wa1 = Numeric.take(diag, ipvt)*Numeric.take(wa2, ipvt)/dxnorm
wa1[0] = wa1[0] / r[0,0] ## Degenerate case
for j in range(1,n): ## Note "1" here, not zero
sum = Numeric.sum(r[0:j,j]*wa1[0:j])
wa1[j] = (wa1[j] - sum)/r[j,j]
temp = self.enorm(wa1)
parl = ((fp/delta)/temp)/temp
## Calculate an upper bound, paru, for the zero of the function
for j in range(n):
sum = Numeric.sum(r[0:j+1,j]*qtb[0:j+1])
wa1[j] = sum/diag[ipvt[j]]
gnorm = self.enorm(wa1)
paru = gnorm/delta
if paru == 0: paru = dwarf/min([delta,0.1])
## If the input par lies outside of the interval (parl,paru), set
## par to the closer endpoint
par = max([par,parl])
par = min([par,paru])
if par == 0: par = gnorm/dxnorm
## Beginning of an interation
while(1):
iter = iter + 1
## Evaluate the function at the current value of par
if par == 0: par = max([dwarf, paru*0.001])
temp = Numeric.sqrt(par)
wa1 = temp * diag
[r, x, sdiag] = self.qrsolv(r, ipvt, wa1, qtb, sdiag)
wa2 = diag*x
dxnorm = self.enorm(wa2)
temp = fp
fp = dxnorm - delta
if ((abs(fp) <= 0.1*delta) or
((parl == 0) and (fp <= temp) and (temp < 0)) or
(iter == 10)): break;
## Compute the newton correction
wa1 = Numeric.take(diag, ipvt)*Numeric.take(wa2, ipvt)/dxnorm
for j in range(n-1):
wa1[j] = wa1[j]/sdiag[j]
wa1[j+1:n] = wa1[j+1:n] - r[j+1:n,j]*wa1[j]
wa1[n-1] = wa1[n-1]/sdiag[n-1] ## Degenerate case
temp = self.enorm(wa1)
parc = ((fp/delta)/temp)/temp
## Depending on the sign of the function, update parl or paru
if fp > 0: parl = max([parl,par])
if fp < 0: paru = min([paru,par])
## Compute an improved estimate for par
par = max([parl, par+parc])
## End of an iteration
## Termination
return[r, par, x, sdiag]
## Procedure to tie one parameter to another.
def tie(self, p, ptied=None):
if (self.debug): print 'Entering tie...'
if (ptied == None): return
for i in range(len(ptied)):
if ptied[i] == '': continue
cmd = 'p[' + str(i) + '] = ' + ptied[i]
exec(cmd)
return(p)
# Original FORTRAN documentation
# **********
#
# subroutine covar
#
# given an m by n matrix a, the problem is to determine
# the covariance matrix corresponding to a, defined as
#
# t
# inverse(a *a) .
#
# this subroutine completes the solution of the problem
# if it is provided with the necessary information from the
# qr factorization, with column pivoting, of a. that is, if
# a*p = q*r, where p is a permutation matrix, q has orthogonal
# columns, and r is an upper triangular matrix with diagonal
# elements of nonincreasing magnitude, then covar expects
# the full upper triangle of r and the permutation matrix p.
# the covariance matrix is then computed as
#
# t t
# p*inverse(r *r)*p .
#
# if a is nearly rank deficient, it may be desirable to compute
# the covariance matrix corresponding to the linearly independent
# columns of a. to define the numerical rank of a, covar uses
# the tolerance tol. if l is the largest integer such that
#
# abs(r(l,l)) .gt. tol*abs(r(1,1)) ,
#
# then covar computes the covariance matrix corresponding to
# the first l columns of r. for k greater than l, column
# and row ipvt(k) of the covariance matrix are set to zero.
#
# the subroutine statement is
#
# subroutine covar(n,r,ldr,ipvt,tol,wa)
#
# where
#
# n is a positive integer input variable set to the order of r.
#
# r is an n by n array. on input the full upper triangle must
# contain the full upper triangle of the matrix r. on output
# r contains the square symmetric covariance matrix.
#
# ldr is a positive integer input variable not less than n
# which specifies the leading dimension of the array r.
#
# ipvt is an integer input array of length n which defines the
# permutation matrix p such that a*p = q*r. column j of p
# is column ipvt(j) of the identity matrix.
#
# tol is a nonnegative input variable used to define the
# numerical rank of a in the manner described above.
#
# wa is a work array of length n.
#
# subprograms called
#
# fortran-supplied ... dabs
#
# argonne national laboratory. minpack project. august 1980.
# burton s. garbow, kenneth e. hillstrom, jorge j. more
#
# **********
def calc_covar(self, rr, ipvt=None, tol=1.e-14):
if (self.debug): print 'Entering calc_covar...'
if Numeric.rank(rr) != 2:
print 'ERROR: r must be a two-dimensional matrix'
return(-1)
s = Numeric.shape(rr)
n = s[0]
if s[0] != s[1]:
print 'ERROR: r must be a square matrix'
return(-1)
if (ipvt == None): ipvt = Numeric.arange(n)
r = rr.copy()
r.shape = [n,n]
## For the inverse of r in the full upper triangle of r
l = -1
tolr = tol * abs(r[0,0])
for k in range(n):
if (abs(r[k,k]) <= tolr): break
r[k,k] = 1./r[k,k]
for j in range(k):
temp = r[k,k] * r[j,k]
r[j,k] = 0.
r[0:j+1,k] = r[0:j+1,k] - temp*r[0:j+1,j]
l = k
## Form the full upper triangle of the inverse of (r transpose)*r
## in the full upper triangle of r
if l >= 0:
for k in range(l+1):
for j in range(k):
temp = r[j,k]
r[0:j+1,j] = r[0:j+1,j] + temp*r[0:j+1,k]
temp = r[k,k]
r[0:k+1,k] = temp * r[0:k+1,k]
## For the full lower triangle of the covariance matrix
## in the strict lower triangle or and in wa
wa = Numeric.repeat([r[0,0]], n)
for j in range(n):
jj = ipvt[j]
sing = j > l
for i in range(j+1):
if sing: r[i,j] = 0.
ii = ipvt[i]
if ii > jj: r[ii,jj] = r[i,j]
if ii < jj: r[jj,ii] = r[i,j]
wa[jj] = r[j,j]
## Symmetrize the covariance matrix in r
for j in range(n):
r[0:j+1,j] = r[j,0:j+1]
r[j,j] = wa[j]
return(r)
class machar:
def __init__(self, double=1):
if (double == 0):
self.machep = 1.19209e-007
self.maxnum = 3.40282e+038
self.minnum = 1.17549e-038
self.maxgam = 171.624376956302725
else:
self.machep = 2.2204460e-016
self.maxnum = 1.7976931e+308
self.minnum = 2.2250739e-308
self.maxgam = 171.624376956302725
self.maxlog = Numeric.log(self.maxnum)
self.minlog = Numeric.log(self.minnum)
self.rdwarf = Numeric.sqrt(self.minnum*1.5) * 10
self.rgiant = Numeric.sqrt(self.maxnum) * 0.1
|
deapplegate/wtgpipeline
|
mpfit.py
|
Python
|
mit
| 88,908
|
[
"Gaussian"
] |
ff1779f0ef4b6bfe2baf59e4386e37910d9ec14a18415bc4c419f6835b9c066a
|
# Copyright (c) 2017, Henrique Miranda
# All rights reserved.
#
# This file is part of the yambopy project
#
from yambopy import *
from yamboparser import *
import os
class YamboQPDB():
"""
Class to read yambo ndb.QP files
These files describe the quasiparticle states calculated from yambo
Includes the quasi-particl energies, the lifetimes and the Z factors
"""
def __init__(self,filename='ndb.QP',folder='.'):
"""
Read a QP file using the yamboparser
"""
self.folder = folder
self.filename = filename
if os.path.isfile('%s/%s'%(folder,filename)):
self.yfile = YamboFile(filename,folder)
else:
raise ValueError('File %s/%s not found'%(folder,filename))
qps = self.yfile.data
self.qps = qps
self.nqps = len(qps['E'])
self.nkpoints = len(qps['Kpoint'])
#get kpoints
kpts=[]
for nk in xrange(self.nkpoints):
kpts.append(qps['Kpoint'][nk])
self.kpoints = np.array(kpts)
#get nbands
min_band = int(qps['Band'][0])
max_band = int(qps['Band'][0])
for iqp in xrange(self.nqps):
band = int(qps['Band'][iqp])
if min_band > band: min_band = band
if max_band < band: max_band = band
self.min_band = min_band
self.max_band = max_band
self.nbands = max_band-min_band+1
#read the database
self.eigenvalues_qp, self.eigenvalues_dft, self.lifetimes = self.get_qps()
def qp_bs(self,lattice,path,debug=False):
"""
Calculate qusi-particle band-structure
"""
#get full kmesh
kpoints = lattice.red_kpoints
path = np.array(path)
kpoints_rep, kpoints_idx_rep = replicate_red_kmesh(kpoints,repx=range(-1,2),repy=range(-1,2),repz=range(-1,2))
band_indexes = get_path(kpoints_rep,path)
band_kpoints = kpoints_rep[band_indexes]
band_indexes = kpoints_idx_rep[band_indexes]
if debug:
for i,k in zip(band_indexes,band_kpoints):
x,y,z = k
plt.text(x,y,i)
plt.scatter(kpoints_rep[:,0],kpoints_rep[:,1])
plt.plot(path[:,0],path[:,1],c='r')
plt.scatter(band_kpoints[:,0],band_kpoints[:,1])
plt.show()
exit()
#get eigenvalues along the path
#expand eigenvalues to the bull brillouin zone
energies_qp = self.eigenvalues_qp[lattice.kpoints_indexes]
#energies_qp = self.eigenvalues_qp
#expand the quasiparticle energies to the bull brillouin zone
energies_dft = self.eigenvalues_dft[lattice.kpoints_indexes]
#energies_dft = self.eigenvalues_dft
energies_dft = energies_dft[band_indexes]
energies_qp = energies_qp[band_indexes]
return np.array(band_kpoints), energies_dft, energies_qp
def plot_qp_bs(self,ax,lattice,path,what='DFT,QP',debug=False,label=False,**args):
"""
Calculate the quasiparticle band-structure
"""
bands_kpoints, energies_dft, energies_qp = self.qp_bs(lattice, path, debug)
#calculate distances
bands_distances = calculate_distances(bands_kpoints)
#make the plots
for b in xrange(self.min_band-1,self.max_band-1):
if 'DFT' in what:
ax.plot(bands_distances, energies_dft[:,b], **args)
if 'QP' in what:
ax.plot(bands_distances, energies_qp[:,b], **args)
if 'DFT' in what:
ax.plot(bands_distances, energies_dft[:,self.max_band-1], label=label, **args)
if 'QP' in what:
ax.plot(bands_distances, energies_qp[:,self.max_band-1], label=label, **args)
#add high-symmetry k-points vertical bars
kpath_car = red_car(path,lattice.rlat)
#calculate distances for high-symmetry points
kpath_distances = calculate_distances( path )
for d in kpath_distances:
ax.axvline(d,c='k')
xmin = np.min(bands_distances)
xmax = np.max(bands_distances)
plt.xlim([xmin,xmax])
return kpath_distances
def get_qps(self):
"""
Get quasiparticle energies in a list
"""
#get dimensions
nqps = self.nqps
nkpts = self.nkpoints
qps = self.qps
kpts = self.kpoints
nbands = int(np.max(qps['Band'][:]))
#start arrays
eigenvalues_dft = np.zeros([nkpts,nbands])
eigenvalues_qp = np.zeros([nkpts,nbands])
lifetimes = np.zeros([nkpts,nbands])
for iqp in xrange(nqps):
kindx = int(qps['Kpoint_index'][iqp])
e = qps['E'][iqp]*ha2ev
e0 = qps['Eo'][iqp]*ha2ev
band = int(qps['Band'][iqp])
kpt = ("%8.4lf "*3)%tuple(kpts[kindx-1])
Z = qps['Z'][iqp]
eigenvalues_qp[kindx-1,band-1] = e.real
eigenvalues_dft[kindx-1,band-1] = e0.real
lifetimes[kindx-1,band-1] = e.imag
return eigenvalues_qp, eigenvalues_dft, lifetimes
def __str__(self):
s = ""
s += "nqps: %d\n"%self.nqps
s += "nkpoints: %d\n"%self.nkpoints
s += "nbands: %d\n"%self.nbands
return s
|
henriquemiranda/yambopy
|
yambopy/dbs/qpdb.py
|
Python
|
bsd-3-clause
| 5,337
|
[
"Yambo"
] |
6a68fa5fe0e9b688b1dfaca7530d517b8f9eac3476fa4d0d9a543342f439f3a8
|
#!/usr/bin/python
"""
# Created on Aug 12, 2016
#
# @author: Gaurav Rastogi (grastogi@avinetworks.com) GitHub ID: grastogi23
#
# module_check: not supported
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
"""
ANSIBLE_METADATA = {'status': ['preview'], 'supported_by': 'community', 'version': '1.0'}
DOCUMENTATION = '''
---
module: avi_api_session
author: Gaurav Rastogi (grastogi@avinetworks.com)
short_description: Avi API Module
description:
- This module can be used for calling any resources defined in Avi REST API. U(https://avinetworks.com/)
- This module is useful for invoking HTTP Patch methods and accessing resources that do not have an REST object associated with them.
version_added: 2.3
requirements: [ avisdk ]
options:
http_method:
description:
- Allowed HTTP methods for RESTful services and are supported by Avi Controller.
choices: ["get", "put", "post", "patch", "delete"]
required: true
data:
description:
- HTTP body in YAML or JSON format.
params:
description:
- Query parameters passed to the HTTP API.
path:
description:
- 'Path for Avi API resource. For example, C(path: virtualservice) will translate to C(api/virtualserivce).'
timeout:
description:
- Timeout (in seconds) for Avi API calls.
extends_documentation_fragment:
- avi
'''
EXAMPLES = '''
- name: Get Pool Information using avi_api_session
avi_api_session:
controller: "{{ controller }}"
username: "{{ username }}"
password: "{{ password }}"
http_method: get
path: pool
params:
name: "{{ pool_name }}"
register: pool_results
- name: Patch Pool with list of servers
avi_api_session:
controller: "{{ controller }}"
username: "{{ username }}"
password: "{{ password }}"
http_method: patch
path: "{{ pool_path }}"
data:
add:
servers:
- ip:
addr: 10.10.10.10
type: V4
- ip:
addr: 20.20.20.20
type: V4
register: updated_pool
- name: Fetch Pool metrics bandwidth and connections rate
avi_api_session:
controller: "{{ controller }}"
username: "{{ username }}"
password: "{{ password }}"
http_method: get
path: analytics/metrics/pool
params:
name: "{{ pool_name }}"
metric_id: l4_server.avg_bandwidth,l4_server.avg_complete_conns
step: 300
limit: 10
register: pool_metrics
'''
RETURN = '''
obj:
description: Avi REST resource
returned: success, changed
type: dict
'''
import json
import time
from ansible.module_utils.basic import AnsibleModule
from copy import deepcopy
try:
from ansible.module_utils.avi import (
avi_common_argument_spec, ansible_return, HAS_AVI)
from avi.sdk.avi_api import ApiSession
from avi.sdk.utils.ansible_utils import avi_obj_cmp, cleanup_absent_fields
except ImportError:
HAS_AVI = False
def main():
argument_specs = dict(
http_method=dict(required=True,
choices=['get', 'put', 'post', 'patch',
'delete']),
path=dict(type='str', required=True),
params=dict(type='dict'),
data=dict(type='jsonarg'),
timeout=dict(type='int', default=60)
)
argument_specs.update(avi_common_argument_spec())
module = AnsibleModule(argument_spec=argument_specs)
if not HAS_AVI:
return module.fail_json(msg=(
'Avi python API SDK (avisdk) is not installed. '
'For more details visit https://github.com/avinetworks/sdk.'))
tenant_uuid = module.params.get('tenant_uuid', None)
api = ApiSession.get_session(
module.params['controller'], module.params['username'],
module.params['password'], tenant=module.params['tenant'],
tenant_uuid=tenant_uuid)
tenant = module.params.get('tenant', '')
timeout = int(module.params.get('timeout'))
# path is a required argument
path = module.params.get('path', '')
params = module.params.get('params', None)
data = module.params.get('data', None)
if data is not None:
data = json.loads(data)
method = module.params['http_method']
existing_obj = None
changed = method != 'get'
gparams = deepcopy(params) if params else {}
gparams.update({'include_refs': '', 'include_name': ''})
if method == 'post':
# need to check if object already exists. In that case
# change the method to be put
gparams['name'] = data['name']
rsp = api.get(path, tenant=tenant, tenant_uuid=tenant_uuid,
params=gparams)
try:
existing_obj = rsp.json()['results'][0]
except IndexError:
# object is not found
pass
else:
# object is present
method = 'put'
path += '/' + existing_obj['uuid']
if method == 'put':
# put can happen with when full path is specified or it is put + post
if existing_obj is None:
using_collection = False
if (len(path.split('/')) == 1) and ('name' in data):
gparams['name'] = data['name']
using_collection = True
rsp = api.get(path, tenant=tenant, tenant_uuid=tenant_uuid,
params=gparams)
rsp_data = rsp.json()
if using_collection:
if rsp_data['results']:
existing_obj = rsp_data['results'][0]
path += '/' + existing_obj['uuid']
else:
method = 'post'
else:
if rsp.status_code == 404:
method = 'post'
else:
existing_obj = rsp_data
if existing_obj:
changed = not avi_obj_cmp(data, existing_obj)
cleanup_absent_fields(data)
if method == 'patch':
rsp = api.get(path, tenant=tenant, tenant_uuid=tenant_uuid,
params=gparams)
existing_obj = rsp.json()
if (method == 'put' and changed) or (method != 'put'):
fn = getattr(api, method)
rsp = fn(path, tenant=tenant, tenant_uuid=tenant, timeout=timeout,
params=params, data=data)
else:
rsp = None
if method == 'delete' and rsp.status_code == 404:
changed = False
rsp.status_code = 200
if method == 'patch' and existing_obj and rsp.status_code < 299:
# Ideally the comparison should happen with the return values
# from the patch API call. However, currently Avi API are
# returning different hostname when GET is used vs Patch.
# tracked as AV-12561
if path.startswith('pool'):
time.sleep(1)
gparams = deepcopy(params) if params else {}
gparams.update({'include_refs': '', 'include_name': ''})
rsp = api.get(path, tenant=tenant, tenant_uuid=tenant_uuid,
params=gparams)
new_obj = rsp.json()
changed = not avi_obj_cmp(new_obj, existing_obj)
if rsp is None:
return module.exit_json(changed=changed, obj=existing_obj)
return ansible_return(module, rsp, changed, req=data)
if __name__ == '__main__':
main()
|
t0mk/ansible
|
lib/ansible/modules/network/avi/avi_api_session.py
|
Python
|
gpl-3.0
| 8,037
|
[
"VisIt"
] |
52325d392375edd6d97cd9b2b7e9a6f197cd9adba9639a5025e01d3c0fda1b85
|
'''
Conversion of basis sets to Q-Chem format
'''
from .. import lut, manip, sort, printing
def _determine_pure(basis):
# starts at d shells
pure = {}
for eldata in basis['elements'].values():
if 'electron_shells' not in eldata:
continue
for sh in eldata['electron_shells']:
for shell_am in sh['angular_momentum']:
harm = '2' # cartesian
if 'spherical' in sh['function_type']:
harm = '1'
if shell_am in pure:
pure[shell_am] = harm if harm == '1' else pure[shell_am]
else:
pure[shell_am] = harm
pure_list = sorted(pure.items(), reverse=True)
pure_list = pure_list[:-2] # Trim s & p
return ''.join(x[1] for x in pure_list)
def write_qchem(basis):
'''Converts a basis set to Q-Chem
Q-Chem is basically gaussian format, wrapped in $basis/$end
This also outputs the PURECART variable of the $rem block
'''
s = ''
basis = manip.uncontract_general(basis, True)
basis = manip.uncontract_spdf(basis, 1, False)
basis = sort.sort_basis(basis, False)
# Elements for which we have electron basis
electron_elements = [k for k, v in basis['elements'].items() if 'electron_shells' in v]
# Elements for which we have ECP
ecp_elements = [k for k, v in basis['elements'].items() if 'ecp_potentials' in v]
purecart = _determine_pure(basis)
if purecart != '':
s += "$rem\n"
if electron_elements:
s += " BASIS GEN\n"
if ecp_elements:
s += " ECP GEN\n"
s += " PURECART " + _determine_pure(basis) + "\n"
s += "$end\n\n"
# Electron Basis
if electron_elements:
s += "$basis\n"
for z in electron_elements:
data = basis['elements'][z]
sym = lut.element_sym_from_Z(z, True)
s += '{} 0\n'.format(sym)
for shell in data['electron_shells']:
exponents = shell['exponents']
coefficients = shell['coefficients']
ncol = len(coefficients) + 1
nprim = len(exponents)
am = shell['angular_momentum']
amchar = lut.amint_to_char(am, hij=True).upper()
s += '{} {} 1.00\n'.format(amchar, nprim)
point_places = [8 * i + 15 * (i - 1) for i in range(1, ncol + 1)]
s += printing.write_matrix([exponents, *coefficients], point_places, convert_exp=True)
s += '****\n'
s += "$end\n"
# Write out ECP
if ecp_elements:
s += "\n\n$ecp\n"
for z in ecp_elements:
data = basis['elements'][z]
sym = lut.element_sym_from_Z(z).upper()
max_ecp_am = max([x['angular_momentum'][0] for x in data['ecp_potentials']])
max_ecp_amchar = lut.amint_to_char([max_ecp_am], hij=True)
# Sort lowest->highest, then put the highest at the beginning
ecp_list = sorted(data['ecp_potentials'], key=lambda x: x['angular_momentum'])
ecp_list.insert(0, ecp_list.pop())
s += '{} 0\n'.format(sym)
s += '{}-ECP {} {}\n'.format(sym, max_ecp_am, data['ecp_electrons'])
for pot in ecp_list:
rexponents = pot['r_exponents']
gexponents = pot['gaussian_exponents']
coefficients = pot['coefficients']
nprim = len(rexponents)
am = pot['angular_momentum']
amchar = lut.amint_to_char(am, hij=True)
if am[0] == max_ecp_am:
s += '{} potential\n'.format(amchar)
else:
s += '{}-{} potential\n'.format(amchar, max_ecp_amchar)
s += ' ' + str(nprim) + '\n'
point_places = [0, 9, 32]
s += printing.write_matrix([rexponents, gexponents, *coefficients], point_places, convert_exp=True)
s += '****\n'
s += "$end\n"
return s
|
MOLSSI-BSE/basis_set_exchange
|
basis_set_exchange/writers/qchem.py
|
Python
|
bsd-3-clause
| 4,106
|
[
"Gaussian",
"Q-Chem"
] |
fda17bef87058a9e14f9880271aa8466bdd770029e6e5ff13615d05c78262bdd
|
# Orca
#
# Copyright 2005-2009 Sun Microsystems Inc.
# Copyright 2010 Joanmarie Diggs
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the
# Free Software Foundation, Inc., Franklin Street, Fifth Floor,
# Boston MA 02110-1301 USA.
"""Custom script for rhythmbox."""
__id__ = "$Id$"
__version__ = "$Revision$"
__date__ = "$Date$"
__copyright__ = "Copyright (c) 2005-2009 Sun Microsystems Inc." \
"Copyright (c) 2010 Joanmarie Diggs"
__license__ = "LGPL"
import pyatspi
import orca.scripts.default as default
import orca.orca as orca
import orca.orca_state as orca_state
from speech_generator import SpeechGenerator
from braille_generator import BrailleGenerator
from formatting import Formatting
class Script(default.Script):
def __init__(self, app):
"""Creates a new script for the given application.
Arguments:
- app: the application to create a script for.
"""
default.Script.__init__(self, app)
def getBrailleGenerator(self):
"""Returns the braille generator for this script.
"""
return BrailleGenerator(self)
def getSpeechGenerator(self):
"""Returns the speech generator for this script.
"""
return SpeechGenerator(self)
def getFormatting(self):
"""Returns the formatting strings for this script."""
return Formatting(self)
def adjustTableCell(self, obj):
# Check to see if this is a table cell from the Library table.
# If so, it'll have five children and we are interested in the
# penultimate one. See bug #512639 for more details.
#
if obj.childCount == 5:
return obj[3]
else:
return obj
def onActiveDescendantChanged(self, event):
"""Called when an object who manages its own descendants detects a
change in one of its children. Overridden here because the table
on the left-hand side lacks STATE_FOCUSED which causes the default
script to reject this event.
Arguments:
- event: the Event
"""
child = event.any_data
if child:
orca.setLocusOfFocus(event, child)
else:
orca.setLocusOfFocus(event, event.source)
# We'll tuck away the activeDescendant information for future
# reference since the AT-SPI gives us little help in finding
# this.
#
if orca_state.locusOfFocus \
and (orca_state.locusOfFocus != event.source):
self.pointOfReference['activeDescendantInfo'] = \
[orca_state.locusOfFocus.parent,
orca_state.locusOfFocus.getIndexInParent()]
def onFocus(self, event):
"""Called whenever an object gets focus. Overridden here because a
page tab keeps making bogus focus claims when the user is in the
tree on the left-hand side.
Arguments:
- event: the Event
"""
if event.source.getRole() == pyatspi.ROLE_PAGE_TAB \
and not event.source.name and orca_state.locusOfFocus \
and orca_state.locusOfFocus.getRole() == pyatspi.ROLE_TABLE_CELL:
return
default.Script.onFocus(self, event)
|
Alberto-Beralix/Beralix
|
i386-squashfs-root/usr/share/pyshared/orca/scripts/apps/rhythmbox/script.py
|
Python
|
gpl-3.0
| 3,823
|
[
"ORCA"
] |
703127db326a78cf8c103621524a1586ecd6787c466d995f09562597eb2045ca
|
from __future__ import print_function
import numpy as np
import pandas as pd
import astropy.units as u
from math import copysign
from astropy.wcs import WCS
from astropy.io import fits
from astropy.time import Time
from astropy.coordinates import SkyCoord
class KbmodInfo(object):
"""
Hold and process the information describing the input data
and results from a KBMOD search.
"""
def __init__(self, results_filename, image_filename,
visit_list, visit_mjd, results_visits, observatory):
"""
Read in the output from a KBMOD search and store as a pandas
DataFrame.
Take in a list of visits and times for those visits.
Parameters
----------
results_filename: str
The filename of the kbmod search results.
image_filename: str
The filename of the first image used in the kbmod search.
visit_list: numpy array
An array with all possible visit numbers in the search fields.
visit_mjd: numpy array
An array of the corresponding times in MJD for the visits listed
in `visit_list`.
results_visits: list
A list of the visits actually searched by kbmod for the given
results file.
observatory: str
The three character observatory code for the data searched.
"""
self.visit_df = pd.DataFrame(visit_list,
columns=['visit_num'])
self.visit_df['visit_mjd'] = visit_mjd
results_array = np.genfromtxt(results_filename)
# Only keep values and not property names from results file
if len(np.shape(results_array)) == 1:
results_proper = [results_array[1::2]]
elif len(np.shape(results_array)) > 1:
results_proper = results_array[:, 1::2]
self.results_df = pd.DataFrame(results_proper,
columns=['lh', 'flux', 'x0', 'y0',
'x_v', 'y_v', 'obs_count'])
image_fits = fits.open(image_filename)
self.wcs = WCS(image_fits[1].header)
self.results_visits = results_visits
self.results_mjd = self.visit_df[self.visit_df['visit_num'].isin(self.results_visits)]['visit_mjd'].values
self.mjd_0 = self.results_mjd[0]
self.obs = observatory
@staticmethod
def mpc_reader(filename):
"""
Read in a file with observations in MPC format and return the coordinates.
Parameters
----------
filename: str
The name of the file with the MPC-formatted observations.
Returns
-------
coords: astropy SkyCoord object
A SkyCoord object with the ra, dec of the observations.
times: astropy Time object
Times of the observations
"""
iso_times = []
time_frac = []
ra = []
dec = []
with open(filename, 'r') as f:
for line in f:
year = str(line[15:19])
month = str(line[20:22])
day = str(line[23:25])
iso_times.append(str('%s-%s-%s' % (year,month,day)))
time_frac.append(str(line[25:31]))
ra.append(str(line[32:44]))
dec.append(str(line[44:56]))
coords = SkyCoord(ra, dec, unit=(u.hourangle, u.deg))
t = Time(iso_times)
t_obs = []
for t_i, frac in zip(t, time_frac):
t_obs.append(t_i.mjd + float(frac))
obs_times = Time(t_obs, format='mjd')
return coords, obs_times
def get_searched_radec(self, obj_idx):
"""
This will take an image and use its WCS to calculate the
ra, dec locations of the object in the searched data.
Parameters
----------
obj_idx: int
The index of the object in the KBMOD results for which
we want to calculate orbital elements/predictions.
"""
self.result = self.results_df.iloc[obj_idx]
zero_times = self.results_mjd - self.mjd_0
pix_coords_x = self.result['x0'] + \
self.result['x_v']*zero_times
pix_coords_y = self.result['y0'] + \
self.result['y_v']*zero_times
ra, dec = self.wcs.all_pix2world(pix_coords_x, pix_coords_y, 1)
self.coords = SkyCoord(ra*u.deg, dec*u.deg)
def format_results_mpc(self):
"""
This method will take in a row from the results file and output the
astrometry of the object in the searched observations into a file with
MPC formatting.
Returns
-------
mpc_lines: list of strings
List where each entry is an observation as an MPC-formatted string
"""
field_times = Time(self.results_mjd, format='mjd')
mpc_lines = []
for t, c in zip(field_times, self.coords):
mjd_frac = t.mjd % 1.0
ra_hms = c.ra.hms
dec_dms = c.dec.dms
if dec_dms.d != 0:
name = (" c111112 c%4i %02i %08.5f %02i %02i %06.3f%+03i %02i %05.2f %s" %
(t.datetime.year, t.datetime.month, t.datetime.day+mjd_frac,
ra_hms.h, ra_hms.m, ra_hms.s,
dec_dms.d, np.abs(dec_dms.m), np.abs(dec_dms.s), self.obs))
else:
if copysign(1, dec_dms.d) == -1.0:
dec_dms_d = '-00'
else:
dec_dms_d = '+00'
name = (" c111112 c%4i %02i %08.5f %02i %02i %06.3f%s %02i %05.2f %s" %
(t.datetime.year, t.datetime.month, t.datetime.day+mjd_frac,
ra_hms.h, ra_hms.m, ra_hms.s,
dec_dms_d, np.abs(dec_dms.m), np.abs(dec_dms.s), self.obs))
mpc_lines.append(name)
return(mpc_lines)
def save_results_mpc(self, file_out):
"""
Save the MPC-formatted observations to file.
Parameters
----------
file_out: str
The output filename with the MPC-formatted observations
of the KBMOD search result. If None, then it will save
the output as 'kbmod_mpc.dat' and will be the default
file in other methods below where file_in=None.
"""
mpc_lines = self.format_results_mpc()
with open(file_out, 'w') as f:
for obs in mpc_lines:
f.write(obs + '\n')
def get_searched_radec(self, obj_idx):
"""
This will take an image and use its WCS to calculate the
ra, dec locations of the object in the searched data.
Parameters
----------
obj_idx: int
The index of the object in the KBMOD results for which
we want to calculate orbital elements/predictions.
"""
self.result = self.results_df.iloc[obj_idx]
zero_times = self.results_mjd - self.mjd_0
pix_coords_x = self.result['x0'] + \
self.result['x_v']*zero_times
pix_coords_y = self.result['y0'] + \
self.result['y_v']*zero_times
ra, dec = self.wcs.all_pix2world(pix_coords_x, pix_coords_y, 1)
self.coords = SkyCoord(ra*u.deg, dec*u.deg)
|
DiracInstitute/kbmod
|
analysis/kbmod_info.py
|
Python
|
bsd-2-clause
| 7,455
|
[
"VisIt"
] |
e7f7188e0f43a8c52cb86b716ad52a7301dd670201894d405ddb5b668487bb2b
|
# utility functions for frequency related stuff
import numpy as np
import math
import scipy.signal as signal
import scipy.interpolate as interp
from datetime import datetime, timedelta
import matplotlib.pyplot as plt
# import utils
from utilsIO import *
###########################
### SMOOTHING FUNCTIONS
###########################
def smooth1d(x, winLen=11, window='hanning'):
"""
numpy.hanning, numpy.hamming, numpy.bartlett, numpy.blackman, numpy.convolve
scipy.signal.lfilter
TODO: the window parameter could be the window itself if an array instead of a string
NOTE: length(output) != length(input), to correct this: return y[(winLen/2-1):-(winLen/2)] instead of just y.
"""
if x.ndim != 1:
raise ValueError, "smooth only accepts 1 dimension arrays."
if x.size < winLen:
raise ValueError, "Input vector needs to be bigger than window size."
if winLen<3:
return x
if not window in ['flat', 'hanning', 'hamming', 'bartlett', 'blackman', 'cosine', 'parzen']:
raise ValueError, "Window is one of 'flat', 'hanning', 'hamming', 'bartlett', 'blackman'"
s = np.pad(x, (winLen, winLen), mode="edge")
if window == 'flat': #moving average
w=np.ones(winLen, 'd')
else:
w=eval('signal.'+window+'(winLen)')
off = winLen + (winLen - 1)/2
if winLen%2 == 0:
off = winLen + (winLen/2)
y = np.convolve(s, w/w.sum(), mode='full')
return y[off:off+x.size]
def smooth2d(x, winLen=11, window='hanning'):
# winLen[0] is smoothing along windows
# winLen[1] is smoothing in a single window
kernel = np.outer(signal.hanning(winLen[0], 8), signal.gaussian(winLen[1], 8))
# pad to help the boundaries
padded = np.pad(x, ((winLen[0], winLen[0]),(winLen[1], winLen[1])), mode="edge")
# 2d smoothing
blurred = signal.fftconvolve(padded, kernel, mode='same')
return blurred[winLen[0]:winLen[0]+x.shape[0], winLen[1]:winLen[1]+x.shape[1]]
###########################
### STANDARD FILTERING
### AND DECIMATION
###########################
def downsampleTime(data, downsampleFactor):
# if downsampleFactor is 1, nothing to do
if downsampleFactor == 1:
return data
# a downsample factor should not be greater than 13
# hence factorise downsampleFactors that are greater than this
if downsampleFactor > 13:
downsamples = factorise(downsampleFactor)
generalPrint("Decimation", "Downsample factor {} greater than 13. Downsampling will be performed in multiple steps of {}".format(
downsampleFactor, arrayToStringInt(downsamples)))
else:
downsamples = [downsampleFactor]
# downsample for each factor in downsamples
for factor in downsamples:
for c in data:
data[c] = signal.decimate(data[c], factor, zero_phase=True)
return data
def factorise(number):
import primefac
factors = list(primefac.primefac(number))
downsamples = []
# there's a few pathological cases here that are being ignored
# what if the downsample factor is the product of two primes greater than 13
# let's ignore this situation for the time being
val = 1
for f in factors:
test = val*f
if test > 13:
downsamples.append(val)
val = 1
val = val*f
# logic: on the last value of f, val*f is tested
# if this is greater than 13, the previous val is added, which leaves one factor leftover
# if not greater than 13, then this is not added either
# so append the last value. the only situation in which this fails is the last factor itself is over 13.
downsamples.append(val)
return downsamples
def resample(data, fs, fsNew):
# resample the data using the polyphase method which does not assume periodicity
# need to calculate the upsample and then the downsample
# using polyphase filtering, the final sample rate = up / down * original sample rate
# need to calculate up and down
from fractions import Fraction
frac = Fraction(1.0/fs).limit_denominator() # because this is most probably a float
frac = Fraction(frac*int(fsNew))
frac.limit_denominator()
# now do the resampling
# if frac.numerator == 1:
# # then decimate instead of resample
# return downsampleTime(data, frac.denominator)
# otherwise, normal polyphase filtering
resampleData = {}
for c in data:
resampleData[c] = signal.resample_poly(data[c], frac.numerator, frac.denominator)
return resampleData
# lowpass butterworth filter
def lpFilter(data, fs, cutoff, order=5):
# create the filter
normalisedCutoff = 2.0*cutoff/fs
b, a = signal.butter(order, normalisedCutoff, btype="lowpass", analog=False)
# filter each channel
return filterData(data, b, a)
# highpass butterworth filter
def hpFilter(data, fs, cutoff, order=5):
# create the filter
normalisedCutoff = 2.0*cutoff/fs
b, a = signal.butter(order, normalisedCutoff, btype="highpass", analog=False)
return filterData(data, b, a)
def bpFilter(data, fs, cutoffLow, cutoffHigh, order=5):
# create the filter
normalisedCutoffLow = 2.0*cutoffLow/fs
normalisedCutoffHigh = 2.0*cutoffHigh/fs
b, a = signal.butter(order, [normalisedCutoffLow, normalisedCutoffHigh], btype="bandpass", analog=False)
return filterData(data, b, a)
def filterData(data, b, a, padLen=10000):
# filter each channel
filteredData = {}
for c in data:
# filteredData[c] = signal.filtfilt(b, a, data[c], method="pad", padtype="odd", padlen=padLen)
filteredData[c] = signal.filtfilt(b, a, data[c], method="gust", irlen=500)
return filteredData
# Required input defintions are as follows;
# time: Time between samples
# band: The bandwidth around the centerline freqency that you wish to filter
# freq: The centerline frequency to be filtered
# ripple: The maximum passband ripple that is allowed in db
# order: The filter order. For FIR notch filters this is best set to 2 or 3,
# IIR filters are best suited for high values of order. This algorithm
# is hard coded to FIR filters
# filter_type: 'butter', 'bessel', 'cheby1', 'cheby2', 'ellip'
# data: the data to be filtered
def notchFilter(data, fs, freq, band):
nyq = fs/2.0
low = freq - band/2.0
high = freq + band/2.0
low = low/nyq
high = high/nyq
# some options
#ripple =
order = 2
filter_type = "bessel"
#b, a = signal.iirfilter(order, [low, high], rp=ripple, btype='bandstop', analog=False, ftype=filter_type)
b, a = signal.iirfilter(order, [low, high], btype='bandstop', analog=False, ftype=filter_type)
filteredData = signal.lfilter(b, a, data)
return filteredData
###########################
### SHIFTING
###########################
# this actually shifts the time by some amount of time
def timeShift(data, fs, shift, **kwargs):
shiftMode = "samples"
if "mode" in kwargs:
shiftMode = kwargs["mode"]
# calculate out shift
# apply shift
return data
# need a function to interpolate the sampling so that it coincides with full seconds
# the function also shifts the start point to the next full second
# TODO: this function needs to be more robust for low (< 1Hz) sample frequencies as the use of microseconds and seconds makes no sense for this
# THIS FUNCTION WILL TRUNCATE THE DATA TO THE NEXT SECOND
def interpolateToSecond(fs, startTime, data):
# data properties
chans = data.keys()
samplePeriod = 1.0/fs
# set initial vals
numSamples = data[chans[0]].size
# now caluclate the interpolation
microseconds = startTime.time().microsecond
# check if the dataset already begins on a second
if microseconds == 0:
return startTime, numSamples, data # do nothing, already on the second
# now turn microseconds into a decimal
microseconds = microseconds/1000000.0
# now calculate the number of complete samples till the next second
eps = 0.000000001
test = microseconds
samplesToDrop = 0
# this loop will always either calculate till the full second or the next sample passed the full second
while test < 1.0 - eps:
test += samplePeriod
samplesToDrop += 1
# if this is exact, i.e. integer number of samples to next second, just need to drop samples
multiple = (1.0 - microseconds)/samplePeriod
if np.absolute(multiple - samplesToDrop) < eps: # floating point arithmetic
dataInterp = {} # create a new dictionary for data
for chan in chans:
dataInterp[chan] = data[chan][samplesToDrop:]
# update the other data
numSamplesInterp = numSamples - samplesToDrop
startTimeInterp = startTime + timedelta(seconds=1.0*samplesToDrop/fs)
return startTimeInterp, numSamplesInterp, dataInterp
# if here, then we have calculated one extra for samplesToDrop
samplesToDrop -= 1
# now the number of samples to the next full second is not an integer
# interpolation will have to be performed
shift = (multiple - samplesToDrop)*samplePeriod
sampleShift = shift/samplePeriod
x = np.arange(0, numSamples)
xInterp = np.arange(samplesToDrop, numSamples - 1) + sampleShift
# calculate return vars
numSamplesInterp = xInterp.size
startTimeInterp = startTime + timedelta(seconds=1.0*samplesToDrop/fs) + timedelta(seconds=shift)
# do the interpolation
dataInterp = {}
for chan in chans:
interpFunc = interp.InterpolatedUnivariateSpline(x, data[chan])
dataInterp[chan] = interpFunc(xInterp)
# need to calculate how much the
return startTimeInterp, numSamplesInterp, dataInterp
###########################
### REMOVE BAD DATA
###########################
def removeZeros(data):
# this function finds a stretch of zeros and tries to fill them in with better data
# i.e. interpolated data or some such
# first, identify zeros
for chan in data:
data[chan] = removeZerosSingle(data[chan])
return data
def removeZerosSingle(data):
eps = 0.000000001 # use this because of floating point precision
# set an x array
x = np.arange(data.size)
# find zero locations
zeroLocs = np.where(np.absolute(data) < eps)[0] # this returns a tuple, take the first index
if len(zeroLocs) == 0:
return data # no zeros to remove
# now want to find consecutive zeros
grouped = groupConsecutive(zeroLocs)
indicesToFix = []
# now find groups of 3+
for g in grouped:
if g.size >= 20:
indicesToFix = indicesToFix + list(g)
# now have the indices we want to fix
# can go about interpolating values there
indicesToFix = np.array(sorted(indicesToFix))
mask = np.ones(data.size, np.bool)
mask[indicesToFix] = 0
data[indicesToFix] = np.interp(indicesToFix, x[mask], data[mask])
return data
def removeNans(data):
# find nan in the dataset and removes the values
for chan in data:
data[chan] = removeNansSingle(data[chan])
return data
def removeNansSingle(data):
# set an x array
x = np.arange(data.size)
# find locations of nans - this is a bool array with True in locations with nan values
nanLocs = np.isnan(data)
# if no nans, do nothing
if not np.any(nanLocs):
return data # no nans to remove
# create mask
mask = np.ones(data.size, np.bool)
mask[nanLocs] = 0 # using numpy indexing with bool arrays
# no need to group, want to remove every nan
data[nanLocs] = np.interp(x[nanLocs], x[mask], data[mask])
return data
def groupConsecutive(vals, stepsize=1):
"""Return list of consecutive lists of numbers from vals (number list)."""
return np.split(vals, np.where(np.diff(vals) != stepsize)[0]+1)
|
nss350/magPy
|
utils/utilsProcess.py
|
Python
|
apache-2.0
| 11,084
|
[
"Gaussian"
] |
ba33c85802f720e7331b67a5ec03b28aeedb98e5dea7a6546f7e8db624798795
|
# Copyright (C) 2012 David Morton
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import itertools
import numpy
from scipy import integrate
def get_projection_combinations(keys):
pc = list()
for i, j in itertools.product(keys, keys):
if i != j:
s = (i, j)
st = (j, i)
if s not in pc and st not in pc:
pc.append(s)
return pc
def num_projection_combinations(n):
assert n > 0
if n > 2:
return (n-1) + num_projection_combinations(n-1)
elif n == 2:
return 1
elif n == 1:
return 0
def center(cluster):
'''
Returns a vector pointing to the center of a cluster.
'''
return numpy.average(cluster, axis=0)
def get_projection_vector(c1, c2):
'''
Returns the normalized vector pointing from
the center of c1 to the center of c2.
'''
pv = center(c2)-center(c1)
return pv/numpy.linalg.norm(pv)
def projection(c1, c2):
'''
Return the projections of the vectors in c1-center(c1) and
c2-center(c1), onto the vector going from c1 to c2.
'''
cc1 = c1-center(c1)
cc2 = c2-center(c1)
pv = get_projection_vector(c1, c2)
p1 = numpy.dot(cc1, pv)
p2 = numpy.dot(cc2, pv)
return p1, p2
def gaussian(center,peak,width,x):
# simple normal distribution function with center offset
return peak * numpy.exp(-(x-center)**2/width**2)
def normal( mu, std, x ):
# the normal distribution
peak = ( 1/(std*numpy.sqrt(2*numpy.pi)) )
width = numpy.sqrt(2)*std
return gaussian( mu, peak, width, x )
def get_gaussian(projection, xs=[-5, 5, 100]):
'''
Returns a gaussian that describes the distribution
of projections provided.
'''
mu = numpy.average(projection)
std = numpy.std(projection)
x_values = numpy.linspace(*xs)
y_values = normal(mu, std, x_values)
return x_values, y_values
def get_both_gaussians(p1, p2, num_points):
'''
Returns gaussians for the two projection distributions. The
larger of the two will have a peak value of 1.0.
'''
lb, ub = get_bounds(p1, p2)
xs = [lb, ub, num_points]
x, g1y = get_gaussian(p1, xs=xs)
x, g2y = get_gaussian(p2, xs=xs)
g1y *= len(p1)
g2y *= len(p2)
max_y = numpy.max(numpy.hstack([g1y, g2y]))
return x, g1y/max_y, g2y/max_y
def get_min_normal(mus, stds):
return lambda x: min([normal(m, s, x) for m, s in zip(mus, stds)])
def get_bounds(*args):
mus = [numpy.average(p) for p in args]
stds = [numpy.std(p) for p in args]
lower_bounds = [m-8*s for m, s in zip(mus, stds)]
upper_bounds = [m+8*s for m, s in zip(mus, stds)]
lb = min(lower_bounds)
ub = max(upper_bounds)
return lb, ub
def get_overlap(*args):
'''
Return the integral of the overlap of the gaussians
that best describe the supplied data sets.
'''
mus = [numpy.average(p) for p in args]
stds = [numpy.std(p) for p in args]
lb, ub = get_bounds(*args)
return integrate.quad(get_min_normal(mus, stds), lb, ub)[0]
|
davidlmorton/spikepy
|
spikepy/common/projection_utils.py
|
Python
|
gpl-3.0
| 3,660
|
[
"Gaussian"
] |
af3b0f411c80f8da72a9f2cd981c6f84ea3ef4a5198db5e26791fa1a7b1f14dc
|
#!/usr/bin/env python
########################################################################
# $HeadURL$
# File : dirac-admin-modify-user
# Author : Adrian Casajus
########################################################################
"""
Modify a user in the CS.
"""
__RCSID__ = "$Id$"
import DIRAC
from DIRAC.Core.Base import Script
Script.registerSwitch( "p:", "property=", "Add property to the user <name>=<value>" )
Script.registerSwitch( "f", "force", "create the user if it doesn't exist" )
Script.setUsageMessage( '\n'.join( [ __doc__.split( '\n' )[1],
'Usage:',
' %s [option|cfgfile] ... user DN group [group] ...' % Script.scriptName,
'Arguments:',
' user: User name',
' DN: DN of the User',
' group: Add the user to the group' ] ) )
Script.parseCommandLine( ignoreErrors = True )
args = Script.getPositionalArgs()
if len( args ) < 3:
Script.showHelp()
from DIRAC.Interfaces.API.DiracAdmin import DiracAdmin
diracAdmin = DiracAdmin()
exitCode = 0
forceCreation = False
errorList = []
userProps = {}
for unprocSw in Script.getUnprocessedSwitches():
if unprocSw[0] in ( "f", "force" ):
forceCreation = True
elif unprocSw[0] in ( "p", "property" ):
prop = unprocSw[1]
pl = prop.split( "=" )
if len( pl ) < 2:
errorList.append( ( "in arguments", "Property %s has to include a '=' to separate name from value" % prop ) )
exitCode = 255
else:
pName = pl[0]
pValue = "=".join( pl[1:] )
print "Setting property %s to %s" % ( pName, pValue )
userProps[ pName ] = pValue
userName = args[0]
userProps[ 'DN' ] = args[1]
userProps[ 'Groups' ] = args[2:]
if not diracAdmin.csModifyUser( userName, userProps, createIfNonExistant = forceCreation ):
errorList.append( ( "modify user", "Cannot modify user %s" % userName ) )
exitCode = 255
else:
result = diracAdmin.csCommitChanges()
if not result[ 'OK' ]:
errorList.append( ( "commit", result[ 'Message' ] ) )
exitCode = 255
for error in errorList:
print "ERROR %s: %s" % error
DIRAC.exit( exitCode )
|
avedaee/DIRAC
|
Interfaces/scripts/dirac-admin-modify-user.py
|
Python
|
gpl-3.0
| 2,307
|
[
"DIRAC"
] |
b59f540ab94d6649e67b45708ed780109155792215ac43981bc7b47210192040
|
import types
from DIRAC import S_OK, S_ERROR
from DIRAC.Core.Utilities import DEncode
from DIRAC.Core.Security import Properties
from DIRAC.Core.DISET.RequestHandler import RequestHandler
from DIRAC.WorkloadManagementSystem.DB.JobDB import JobDB
from DIRAC.WorkloadManagementSystem.Client.JobState.JobState import JobState
__RCSID__ = "$Id$"
class JobStateSyncHandler( RequestHandler ):
__jobStateMethods = []
@classmethod
def initializeHandler( cls, serviceInfoDict ):
cls.jobDB = JobDB()
result = cls.jobDB._getConnection()
if not result[ 'OK' ]:
cls.log.warn( "Could not connect to JobDB (%s). Resorting to RPC" % result[ 'Message' ] )
result[ 'Value' ].close()
#Try to do magic
myStuff = dir( cls )
jobStateStuff = dir( JobState )
for method in jobStateStuff:
if "export_%s" % method in myStuff:
cls.log.info( "Wrapping method %s. It's already defined in the Handler" % method )
# defMeth = getattr( cls, "export_%s" % method )
# setattr( cls, "_usr_def_%s" % method, defMeth )
# setattr( cls, "types_%s" % method, [ ( types.IntType, types.LongType ), types.TupleType ] )
# setattr( cls, "export_%s" % method, cls.__unwrapAndCall )
continue
elif 'right_%s' % method in jobStateStuff:
cls.log.info( "Mimicking method %s" % method )
setattr( cls, "auth_%s" % method, [ 'all' ] )
setattr( cls, "types_%s" % method, [ ( types.IntType, types.LongType ), types.TupleType ] )
setattr( cls, "export_%s" % method, cls.__mimeticFunction )
return S_OK()
def __unwrapArgs( self, margs ):
if len( margs ) < 1 or type( margs[0] ) != types.TupleType or ( len( margs ) > 1 and type( margs[1] ) != types.DictType ):
return S_ERROR( "Invalid arg stub. Expected tuple( args, kwargs? ), received %s" % str( margs ) )
if len( margs ) == 1:
return S_OK( ( margs[0], {} ) )
else:
return S_OK( ( margs[0], margs[1] ) )
def __mimeticFunction( self, jid, margs ):
method = self.srv_getActionTuple()[1]
result = self.__unwrapArgs( margs )
if not result[ 'OK' ]:
return result
args, kwargs = result[ 'Value' ]
if not self.__clientHasAccess( jid ):
return S_ERROR( "You're not authorized to access jid %s" % jid )
return getattr( self.__getJobState( jid ), method )( *args, **kwargs )
def __unwrapAndCall( self, jid, margs ):
method = self.srv_getActionTuple()[1]
result = self.__unwrapArgs( margs )
if not result[ 'OK' ]:
return result
args, kwargs = result[ 'Value' ]
if not self.__clientHasAccess( jid ):
return S_ERROR( "You're not authorized to access jid %s" % jid )
return getattr( self, "_usr_def_%s" % method )( jid, *args, **kwargs )
def __getJobState( self, jid ):
return JobState( jid, forceLocal = True )
def __clientHasAccess( self, jid ):
result = self.jobDB.getJobAttributes( jid, [ 'Owner', 'OwnerDN', 'OwnerGroup' ] )
if not result[ 'OK' ]:
return S_ERROR( "Cannot retrieve owner for jid %s" % jid )
ownerDict = result[ 'Value' ]
credDict = self.srv_getRemoteCredentials()
idString = "%s@%s (%s)" % ( credDict[ 'username' ], credDict[ 'group' ], credDict[ 'DN' ] )
if Properties.JOB_ADMINISTRATOR in credDict[ 'properties' ]:
self.log.verbose( "%s is job admin of jid %s" % ( idString, jid ) )
return True
if credDict[ 'username' ] == ownerDict[ 'Owner' ]:
self.log.verbose( "%s is owner of jid %s" % ( idString, jid ) )
return True
if Properties.JOB_SHARING in credDict[ 'properties' ] and \
credDict[ 'group' ] == ownerDict[ 'OwnerGroup' ]:
self.log.verbose( "%s is sharing group with jid %s" % ( idString, jid ) )
return True
self.log.verbose( "%s is NOT allowed to access jid %s" % ( idString, jid ) )
return False
#Manifests
auth_getManifest = "all"
types_getManifest = [ ( types.IntType, types.LongType ) ]
def export_getManifest( self, jid ):
return self.__getJobState( jid ).getManifest( rawData = True )
|
Sbalbp/DIRAC
|
WorkloadManagementSystem/Service/JobStateSyncHandler.py
|
Python
|
gpl-3.0
| 4,070
|
[
"DIRAC"
] |
ef7282ee679206537f03cedb9eb1c7223f92c22404fdef0c2143563a2906f383
|
# coding: utf-8
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
# Credit to Dr. Shyue Ping Ong for the template of the calculator
"""
This module implements a TEM pattern calculator.
"""
import json
import os
from collections import namedtuple
from fractions import Fraction
from typing import List, Dict, Tuple, cast
from functools import lru_cache
import numpy as np
import scipy.constants as sc
import pandas as pd
import plotly.graph_objs as go
from pymatgen.core.structure import Structure
from pymatgen.analysis.diffraction.core import AbstractDiffractionPatternCalculator
from pymatgen.symmetry.analyzer import SpacegroupAnalyzer
from pymatgen.util.string import unicodeify_spacegroup, latexify_spacegroup
with open(os.path.join(os.path.dirname(__file__),
"atomic_scattering_params.json")) as f:
ATOMIC_SCATTERING_PARAMS = json.load(f)
__author__ = "Frank Wan, Jason Liang"
__copyright__ = "Copyright 2020, The Materials Project"
__version__ = "0.22"
__maintainer__ = "Jason Liang"
__email__ = "fwan@berkeley.edu, yhljason@berkeley.edu"
__date__ = "03/31/2020"
class TEMCalculator(AbstractDiffractionPatternCalculator):
"""
Computes the TEM pattern of a crystal structure for multiple Laue zones.
Code partially inspired from XRD calculation implementation. X-ray factor to electron factor
conversion based on the International Table of Crystallography.
#TODO: Could add "number of iterations", "magnification", "critical value of beam",
"twin direction" for certain materials, "sample thickness", and "excitation error s"
"""
def __init__(self, symprec: float = None, voltage: float = 200,
beam_direction: Tuple[int, int, int] = (0, 0, 1), camera_length: int = 160,
debye_waller_factors: Dict[str, float] = None, cs: float = 1) -> None:
"""
Args:
symprec (float): Symmetry precision for structure refinement. If
set to 0, no refinement is done. Otherwise, refinement is
performed using spglib with provided precision.
voltage (float): The wavelength is a function of the TEM microscope's
voltage. By default, set to 200 kV. Units in kV.
beam_direction (tuple): The direction of the electron beam fired onto the sample.
By default, set to [0,0,1], which corresponds to the normal direction
of the sample plane.
camera_length (int): The distance from the sample to the projected diffraction pattern.
By default, set to 160 cm. Units in cm.
debye_waller_factors ({element symbol: float}): Allows the
specification of Debye-Waller factors. Note that these
factors are temperature dependent.
cs (float): the chromatic aberration coefficient. set by default to 1 mm.
"""
self.symprec = symprec
self.voltage = voltage
self.beam_direction = beam_direction
self.camera_length = camera_length
self.debye_waller_factors = debye_waller_factors or {}
self.cs = cs
@lru_cache(1)
def wavelength_rel(self) -> float:
"""
Calculates the wavelength of the electron beam with relativistic kinematic effects taken
into account.
Args:
none
Returns:
Relativistic Wavelength (in angstroms)
"""
wavelength_rel = sc.h / np.sqrt(2 * sc.m_e * sc.e * 1000 * self.voltage *
(1 + (sc.e * 1000 * self.voltage) / (2 * sc.m_e * sc.c ** 2))) * (10 ** 10)
return wavelength_rel
def generate_points(self, coord_left: int = -10, coord_right: int = 10) -> np.ndarray:
"""
Generates a bunch of 3D points that span a cube.
Args:
coord_left (int): The minimum coordinate value.
coord_right (int): The maximum coordinate value.
Returns:
Numpy 2d array
"""
points = [0, 0, 0]
coord_values = np.arange(coord_left, coord_right + 1)
points[0], points[1], points[2] = np.meshgrid(coord_values, coord_values, coord_values)
points_matrix = (np.ravel(points[i]) for i in range(0, 3))
result = np.vstack(list(points_matrix)).transpose()
return result
def zone_axis_filter(self, points: np.ndarray, laue_zone: int = 0) -> List[Tuple[int, int, int]]:
"""
Filters out all points that exist within the specified Laue zone according to the zone axis rule.
Args:
points (np.ndarray): The list of points to be filtered.
laue_zone (int): The desired Laue zone.
Returns:
list of 3-tuples
"""
if any(isinstance(n, tuple) for n in points):
return points
if len(points) == 0:
return []
filtered = np.where(np.dot(np.array(self.beam_direction), np.transpose(points)) == laue_zone)
result = points[filtered]
result_tuples = cast(List[Tuple[int, int, int]], [tuple(x) for x in result.tolist()])
return result_tuples
def get_interplanar_spacings(self, structure: Structure, points: List[Tuple[int, int, int]]) \
-> Dict[Tuple[int, int, int], float]:
"""
Args:
structure (Structure): the input structure.
points (tuple): the desired hkl indices.
Returns:
Dict of hkl to its interplanar spacing, in angstroms (float).
"""
points_filtered = self.zone_axis_filter(points)
if (0, 0, 0) in points_filtered:
points_filtered.remove((0, 0, 0))
interplanar_spacings_val = np.array(list(map(lambda x: structure.lattice.d_hkl(x), points_filtered)))
interplanar_spacings = dict(zip(points_filtered, interplanar_spacings_val))
return interplanar_spacings
def bragg_angles(self, interplanar_spacings: Dict[Tuple[int, int, int], float]) \
-> Dict[Tuple[int, int, int], float]:
"""
Gets the Bragg angles for every hkl point passed in (where n = 1).
Args:
interplanar_spacings (dict): dictionary of hkl to interplanar spacing
Returns:
dict of hkl plane (3-tuple) to Bragg angle in radians (float)
"""
plane = list(interplanar_spacings.keys())
interplanar_spacings_val = np.array(list(interplanar_spacings.values()))
bragg_angles_val = np.arcsin(self.wavelength_rel() / (2 * interplanar_spacings_val))
bragg_angles = dict(zip(plane, bragg_angles_val))
return bragg_angles
def get_s2(self, bragg_angles: Dict[Tuple[int, int, int], float]) \
-> Dict[Tuple[int, int, int], float]:
"""
Calculates the s squared parameter (= square of sin theta over lambda) for each hkl plane.
Args:
bragg_angles (Dict): The bragg angles for each hkl plane.
Returns:
Dict of hkl plane to s2 parameter, calculates the s squared parameter
(= square of sin theta over lambda).
"""
plane = list(bragg_angles.keys())
bragg_angles_val = np.array(list(bragg_angles.values()))
s2_val = (np.sin(bragg_angles_val) / self.wavelength_rel()) ** 2
s2 = dict(zip(plane, s2_val))
return s2
def x_ray_factors(self, structure: Structure, bragg_angles: Dict[Tuple[int, int, int], float]) \
-> Dict[str, Dict[Tuple[int, int, int], float]]:
"""
Calculates x-ray factors, which are required to calculate atomic scattering factors. Method partially inspired
by the equivalent process in the xrd module.
Args:
structure (Structure): The input structure.
bragg_angles (Dict): Dictionary of hkl plane to Bragg angle.
Returns:
dict of atomic symbol to another dict of hkl plane to x-ray factor (in angstroms).
"""
x_ray_factors = {}
s2 = self.get_s2(bragg_angles)
atoms = structure.composition.elements
scattering_factors_for_atom = {}
for atom in atoms:
coeffs = np.array(ATOMIC_SCATTERING_PARAMS[atom.symbol])
for plane in bragg_angles:
scattering_factor_curr = atom.Z - 41.78214 * s2[plane] * np.sum(coeffs[:, 0]
* np.exp(-coeffs[:, 1] * s2[plane]),
axis=None)
scattering_factors_for_atom[plane] = scattering_factor_curr
x_ray_factors[atom.symbol] = scattering_factors_for_atom
scattering_factors_for_atom = {}
return x_ray_factors
def electron_scattering_factors(self, structure: Structure, bragg_angles: Dict[Tuple[int, int, int], float]) \
-> Dict[str, Dict[Tuple[int, int, int], float]]:
"""
Calculates atomic scattering factors for electrons using the Mott-Bethe formula (1st order Born approximation).
Args:
structure (Structure): The input structure.
bragg_angles (dict of 3-tuple to float): The Bragg angles for each hkl plane.
Returns:
dict from atomic symbol to another dict of hkl plane to factor (in angstroms)
"""
electron_scattering_factors = {}
x_ray_factors = self.x_ray_factors(structure, bragg_angles)
s2 = self.get_s2(bragg_angles)
atoms = structure.composition.elements
prefactor = 0.023934
scattering_factors_for_atom = {}
for atom in atoms:
for plane in bragg_angles:
scattering_factor_curr = prefactor * (atom.Z - x_ray_factors[atom.symbol][plane]) / s2[plane]
scattering_factors_for_atom[plane] = scattering_factor_curr
electron_scattering_factors[atom.symbol] = scattering_factors_for_atom
scattering_factors_for_atom = {}
return electron_scattering_factors
def cell_scattering_factors(self, structure: Structure, bragg_angles: Dict[Tuple[int, int, int], float]) \
-> Dict[Tuple[int, int, int], int]:
"""
Calculates the scattering factor for the whole cell.
Args:
structure (Structure): The input structure.
bragg_angles (dict of 3-tuple to float): The Bragg angles for each hkl plane.
Returns:
dict of hkl plane (3-tuple) to scattering factor (in angstroms).
"""
cell_scattering_factors = {}
electron_scattering_factors = self.electron_scattering_factors(structure, bragg_angles)
scattering_factor_curr = 0
for plane in bragg_angles:
for site in structure:
for sp, occu in site.species.items():
g_dot_r = np.dot(np.array(plane), np.transpose(site.frac_coords))
scattering_factor_curr += electron_scattering_factors[sp.symbol][plane] * np.exp(
2j * np.pi * g_dot_r)
cell_scattering_factors[plane] = scattering_factor_curr
scattering_factor_curr = 0
return cell_scattering_factors
def cell_intensity(self, structure: Structure, bragg_angles: Dict[Tuple[int, int, int], float]) \
-> Dict[Tuple[int, int, int], float]:
"""
Calculates cell intensity for each hkl plane. For simplicity's sake, take I = |F|**2.
Args:
structure (Structure): The input structure.
bragg_angles (dict of 3-tuple to float): The Bragg angles for each hkl plane.
Returns:
dict of hkl plane to cell intensity
"""
csf = self.cell_scattering_factors(structure, bragg_angles)
plane = bragg_angles.keys()
csf_val = np.array(list(csf.values()))
cell_intensity_val = (csf_val * csf_val.conjugate()).real
cell_intensity = dict(zip(plane, cell_intensity_val))
return cell_intensity
def get_pattern(self, structure: Structure, scaled: bool = None, two_theta_range: Tuple[float, float] = None) \
-> pd.DataFrame:
"""
Returns all relevant TEM DP info in a pandas dataframe.
Args:
structure (Structure): The input structure.
scaled (boolean): Required value for inheritance, does nothing in TEM pattern
two_theta_range (Tuple): Required value for inheritance, does nothing in TEM pattern
Returns:
PandasDataFrame
"""
if self.symprec:
finder = SpacegroupAnalyzer(structure, symprec=self.symprec)
structure = finder.get_refined_structure()
points = self.generate_points(-10, 11)
tem_dots = self.tem_dots(structure, points)
field_names = ["Position", "(hkl)", "Intensity (norm)", "Film radius", "Interplanar Spacing"]
rows_list = []
for dot in tem_dots:
dict1 = {'Pos': dot.position, '(hkl)': dot.hkl, 'Intnsty (norm)': dot.intensity,
'Film rad': dot.film_radius, 'Interplanar Spacing': dot.d_spacing}
rows_list.append(dict1)
df = pd.DataFrame(rows_list, columns=field_names)
return df
def normalized_cell_intensity(self, structure: Structure, bragg_angles: Dict[Tuple[int, int, int], float]) \
-> Dict[Tuple[int, int, int], float]:
"""
Normalizes the cell_intensity dict to 1, for use in plotting.
Args:
structure (Structure): The input structure.
bragg_angles (dict of 3-tuple to float): The Bragg angles for each hkl plane.
Returns:
dict of hkl plane to normalized cell intensity
"""
normalized_cell_intensity = {}
cell_intensity = self.cell_intensity(structure, bragg_angles)
max_intensity = max([v for v in cell_intensity.values()])
norm_factor = 1 / max_intensity
for plane in cell_intensity:
normalized_cell_intensity[plane] = cell_intensity[plane] * norm_factor
return normalized_cell_intensity
def is_parallel(self, structure: Structure, plane: Tuple[int, int, int], other_plane: Tuple[int, int, int]) \
-> bool:
"""
Checks if two hkl planes are parallel in reciprocal space.
Args:
structure (Structure): The input structure.
plane (3-tuple): The first plane to be compared.
other_plane (3-tuple): The other plane to be compared.
Returns:
boolean
"""
phi = self.get_interplanar_angle(structure, plane, other_plane)
return phi in (180, 0) or np.isnan(phi)
def get_first_point(self, structure: Structure, points: list) -> Dict[Tuple[int, int, int], float]:
"""
Gets the first point to be plotted in the 2D DP, corresponding to maximum d/minimum R.
Args:
structure (Structure): The input structure.
points (list): All points to be checked.
Returns:
dict of a hkl plane to max interplanar distance.
"""
max_d = -100.0
max_d_plane = (0, 0, 1)
points = self.zone_axis_filter(points)
spacings = self.get_interplanar_spacings(structure, points)
for plane in sorted(spacings.keys()):
if spacings[plane] > max_d:
max_d_plane = plane
max_d = spacings[plane]
return {max_d_plane: max_d}
def get_interplanar_angle(self, structure: Structure, p1: Tuple[int, int, int], p2: Tuple[int, int, int]) \
-> float:
"""
Returns the interplanar angle (in degrees) between the normal of two crystal planes.
Formulas from International Tables for Crystallography Volume C pp. 2-9.
Args:
structure (Structure): The input structure.
p1 (3-tuple): plane 1
p2 (3-tuple): plane 2
Returns:
float
"""
a, b, c = structure.lattice.a, structure.lattice.b, structure.lattice.c
alpha, beta, gamma = np.deg2rad(structure.lattice.alpha), np.deg2rad(structure.lattice.beta), \
np.deg2rad(structure.lattice.gamma)
v = structure.lattice.volume
a_star = b * c * np.sin(alpha) / v
b_star = a * c * np.sin(beta) / v
c_star = a * b * np.sin(gamma) / v
cos_alpha_star = (np.cos(beta) * np.cos(gamma) - np.cos(alpha)) / (np.sin(beta) * np.sin(gamma))
cos_beta_star = (np.cos(alpha) * np.cos(gamma) - np.cos(beta)) / (np.sin(alpha) * np.sin(gamma))
cos_gamma_star = (np.cos(alpha) * np.cos(beta) - np.cos(gamma)) / (np.sin(alpha) * np.sin(beta))
r1_norm = np.sqrt(
p1[0] ** 2 * a_star ** 2 + p1[1] ** 2 * b_star ** 2 + p1[2] ** 2 * c_star ** 2 + 2 * p1[0] * p1[1]
* a_star * b_star * cos_gamma_star + 2 * p1[0] * p1[2] * a_star * c_star
* cos_beta_star + 2 * p1[1] * p1[2] * b_star * c_star * cos_gamma_star
)
r2_norm = np.sqrt(
p2[0] ** 2 * a_star ** 2 + p2[1] ** 2 * b_star ** 2 + p2[2] ** 2 * c_star ** 2 + 2 * p2[0] * p2[1]
* a_star * b_star * cos_gamma_star + 2 * p2[0] * p2[2] * a_star * c_star
* cos_beta_star + 2 * p2[1] * p2[2] * b_star * c_star * cos_gamma_star
)
r1_dot_r2 = p1[0] * p2[0] * a_star ** 2 + p1[1] * p2[1] * b_star ** 2 + p1[2] * p2[2] * c_star ** 2 \
+ (p1[0] * p2[1] + p2[0] * p1[1]) * a_star * b_star * cos_gamma_star \
+ (p1[0] * p2[2] + p2[0] * p1[1]) * a_star * c_star * cos_beta_star \
+ (p1[1] * p2[2] + p2[1] * p1[2]) * b_star * c_star * cos_alpha_star
phi = np.arccos(r1_dot_r2 / (r1_norm * r2_norm))
return np.rad2deg(phi)
def get_plot_coeffs(self, p1: Tuple[int, int, int], p2: Tuple[int, int, int], p3: Tuple[int, int, int]) \
-> np.ndarray:
"""
Calculates coefficients of the vector addition required to generate positions for each DP point
by the Moore-Penrose inverse method.
Args:
p1 (3-tuple): The first point. Fixed.
p2 (3-tuple): The second point. Fixed.
p3 (3-tuple): The point whose coefficients are to be calculted.
Returns:
Numpy array
"""
a = np.array([[p1[0], p2[0]],
[p1[1], p2[1]],
[p1[2], p2[2]]])
b = np.array([[p3[0], p3[1], p3[2]]]).T
a_pinv = np.linalg.pinv(a)
x = np.dot(a_pinv, b)
return np.ravel(x)
def get_positions(self, structure: Structure, points: list) -> Dict[Tuple[int, int, int], list]:
"""
Calculates all the positions of each hkl point in the 2D diffraction pattern by vector addition.
Distance in centimeters.
Args:
structure (Structure): The input structure.
points (list): All points to be checked.
Returns:
dict of hkl plane to xy-coordinates.
"""
positions = {}
points = self.zone_axis_filter(points)
# first is the max_d, min_r
first_point_dict = self.get_first_point(structure, points)
for point in first_point_dict:
first_point = point
first_d = first_point_dict[point]
spacings = self.get_interplanar_spacings(structure, points)
# second is the first non-parallel-to-first-point vector when sorted.
# note 000 is "parallel" to every plane vector.
for plane in sorted(spacings.keys()):
second_point, second_d = plane, spacings[plane]
if not self.is_parallel(structure, first_point, second_point):
break
p1 = first_point
p2 = second_point
if (0, 0, 0) in points:
points.remove((0, 0, 0))
points.remove(first_point)
points.remove(second_point)
positions[(0, 0, 0)] = np.array([0, 0])
r1 = self.wavelength_rel() * self.camera_length / first_d
positions[first_point] = np.array([r1, 0])
r2 = self.wavelength_rel() * self.camera_length / second_d
phi = np.deg2rad(self.get_interplanar_angle(structure, first_point, second_point))
positions[second_point] = np.array([r2 * np.cos(phi), r2 * np.sin(phi)])
for plane in points:
coeffs = self.get_plot_coeffs(p1, p2, plane)
pos = np.array([coeffs[0] * positions[first_point][0] + coeffs[1] * positions[second_point][0],
coeffs[0] * positions[first_point][1] + coeffs[1] * positions[second_point][1]])
positions[plane] = pos
points.append((0, 0, 0))
points.append(first_point)
points.append(second_point)
return positions
def tem_dots(self, structure: Structure, points: list) -> list:
"""
Generates all TEM_dot as named tuples that will appear on the 2D diffraction pattern.
Args:
structure (Structure): The input structure.
points (list): All points to be checked.
Returns:
list of TEM_dots
"""
dots = []
interplanar_spacings = self.get_interplanar_spacings(structure, points)
bragg_angles = self.bragg_angles(interplanar_spacings)
cell_intensity = self.normalized_cell_intensity(structure, bragg_angles)
positions = self.get_positions(structure, points)
for plane in cell_intensity.keys():
dot = namedtuple('TEM_dot', ['position', 'hkl', 'intensity', 'film_radius', 'd_spacing'])
position = positions[plane]
hkl = plane
intensity = cell_intensity[plane]
film_radius = 0.91 * (10 ** -3 * self.cs * self.wavelength_rel() ** 3) ** Fraction('1/4')
d_spacing = interplanar_spacings[plane]
tem_dot = dot(position, hkl, intensity, film_radius, d_spacing)
dots.append(tem_dot)
return dots
def get_plot_2d(self, structure: Structure) -> go.Figure:
"""
Generates the 2D diffraction pattern of the input structure.
Args:
structure (Structure): The input structure.
Returns:
Figure
"""
if self.symprec:
finder = SpacegroupAnalyzer(structure, symprec=self.symprec)
structure = finder.get_refined_structure()
points = self.generate_points(-10, 11)
tem_dots = self.tem_dots(structure, points)
xs = []
ys = []
hkls = []
intensities = []
for dot in tem_dots:
xs.append(dot.position[0])
ys.append(dot.position[1])
hkls.append(str(dot.hkl))
intensities.append(dot.intensity)
hkls = list(map(unicodeify_spacegroup, list(map(latexify_spacegroup, hkls))))
data = [
go.Scatter(
x=xs,
y=ys,
text=hkls,
hoverinfo='text',
mode='markers',
marker=dict(
size=8,
cmax=1,
cmin=0,
color=intensities,
colorscale=[[0, 'black'], [1.0, 'white']]
),
showlegend=False,
), go.Scatter(
x=[0],
y=[0],
text="(0, 0, 0): Direct beam",
hoverinfo='text',
mode='markers',
marker=dict(
size=14,
cmax=1,
cmin=0,
color='white'
),
showlegend=False,
)
]
layout = go.Layout(
title='2D Diffraction Pattern<br>Beam Direction: ' + ''.join(str(e) for e in self.beam_direction),
font=dict(
size=14,
color='#7f7f7f'),
hovermode='closest',
xaxis=dict(
range=[-4, 4],
showgrid=False,
zeroline=False,
showline=False,
ticks='',
showticklabels=False
),
yaxis=dict(
range=[-4, 4],
showgrid=False,
zeroline=False,
showline=False,
ticks='',
showticklabels=False,
),
width=550,
height=550,
paper_bgcolor='rgba(100,110,110,0.5)',
plot_bgcolor='black',
)
fig = go.Figure(data=data, layout=layout)
return fig
def get_plot_2d_concise(self, structure: Structure) -> go.Figure:
"""
Generates the concise 2D diffraction pattern of the input structure of a smaller size and without layout.
Does not display.
Args:
structure (Structure): The input structure.
Returns:
Figure
"""
if self.symprec:
finder = SpacegroupAnalyzer(structure, symprec=self.symprec)
structure = finder.get_refined_structure()
points = self.generate_points(-10, 11)
tem_dots = self.tem_dots(structure, points)
xs = []
ys = []
hkls = []
intensities = []
for dot in tem_dots:
if dot.hkl != (0, 0, 0):
xs.append(dot.position[0])
ys.append(dot.position[1])
hkls.append(dot.hkl)
intensities.append(dot.intensity)
data = [
go.Scatter(
x=xs,
y=ys,
text=hkls,
mode='markers',
hoverinfo='skip',
marker=dict(
size=4,
cmax=1,
cmin=0,
color=intensities,
colorscale=[[0, 'black'], [1.0, 'white']]
),
showlegend=False
)
]
layout = go.Layout(
xaxis=dict(
range=[-4, 4],
showgrid=False,
zeroline=False,
showline=False,
ticks='',
showticklabels=False
),
yaxis=dict(
range=[-4, 4],
showgrid=False,
zeroline=False,
showline=False,
ticks='',
showticklabels=False,
),
plot_bgcolor='black',
margin={'l': 0, 'r': 0, 't': 0, 'b': 0},
width=121,
height=121,
)
fig = go.Figure(data=data, layout=layout)
fig.layout.update(showlegend=False)
return fig
|
gVallverdu/pymatgen
|
pymatgen/analysis/diffraction/tem.py
|
Python
|
mit
| 26,839
|
[
"CRYSTAL",
"pymatgen"
] |
2de184d0b45b9ec030dfeb1ceba3638dc0b6d25393cc0e4901baedb61241f236
|
#!/usr/bin/env python
#############################################################################
##
## Copyright (C) 2009 Nokia Corporation and/or its subsidiary(-ies).
## All rights reserved.
## Contact: Nokia Corporation (qt-info@nokia.com)
##
## This file is part of the test suite of the Qt Toolkit.
##
## $QT_BEGIN_LICENSE:LGPL$
## Commercial Usage
## Licensees holding valid Qt Commercial licenses may use this file in
## accordance with the Qt Commercial License Agreement provided with the
## Software or, alternatively, in accordance with the terms contained in
## a written agreement between you and Nokia.
##
## GNU Lesser General Public License Usage
## Alternatively, this file may be used under the terms of the GNU Lesser
## General Public License version 2.1 as published by the Free Software
## Foundation and appearing in the file LICENSE.LGPL included in the
## packaging of this file. Please review the following information to
## ensure the GNU Lesser General Public License version 2.1 requirements
## will be met: http://www.gnu.org/licenses/old-licenses/lgpl-2.1.html.
##
## In addition, as a special exception, Nokia gives you certain additional
## rights. These rights are described in the Nokia Qt LGPL Exception
## version 1.1, included in the file LGPL_EXCEPTION.txt in this package.
##
## GNU General Public License Usage
## Alternatively, this file may be used under the terms of the GNU
## General Public License version 3.0 as published by the Free Software
## Foundation and appearing in the file LICENSE.GPL included in the
## packaging of this file. Please review the following information to
## ensure the GNU General Public License version 3.0 requirements will be
## met: http://www.gnu.org/copyleft/gpl.html.
##
## If you have questions regarding the use of this file, please contact
## Nokia at qt-info@nokia.com.
## $QT_END_LICENSE$
##
#############################################################################
import os, sys
from PyQt4.QtCore import *
from PyQt4.QtGui import *
def createGroupBox(parent, attributes = None, fill = False, fake = False):
background = CustomWidget(parent, fake)
backgroundLayout = QVBoxLayout()
backgroundLayout.setMargin(4)
background.setLayout(backgroundLayout)
groupBox = QGroupBox("&Options")
layout = QGridLayout()
groupBox.setLayout(layout)
layout.addWidget(QCheckBox("C&ase sensitive"), 0, 0)
layout.addWidget(QCheckBox("W&hole words"), 0, 1)
checkedBox = QCheckBox("Search &forwards")
checkedBox.setChecked(True)
layout.addWidget(checkedBox, 1, 0)
layout.addWidget(QCheckBox("From &start of text"), 1, 1)
backgroundLayout.addWidget(groupBox)
if attributes:
for attr in attributes:
groupBox.setAttribute(attr, True)
if not fake:
background.setAttribute(attr, True)
groupBox.setAutoFillBackground(fill)
background.setAutoFillBackground(fill)
return background
class CustomWidget(QWidget):
def __init__(self, parent, fake = False):
QWidget.__init__(self, parent)
self.fake = fake
self.fakeBrush = QBrush(Qt.red, Qt.DiagCrossPattern)
def paintEvent(self, event):
painter = QPainter()
painter.begin(self)
painter.setRenderHint(QPainter.Antialiasing)
if self.fake:
painter.fillRect(event.rect(), QBrush(Qt.white))
painter.fillRect(event.rect(), self.fakeBrush)
painter.end()
if __name__ == "__main__":
try:
qt = sys.argv[1]
except IndexError:
qt = "4.1"
if qt != "4.0" and qt != "4.1":
sys.stderr.write("Usage: %s [4.0|4.1]\n" % sys.argv[0])
sys.exit(1)
app = QApplication(sys.argv)
exec_dir = os.path.split(os.path.abspath(sys.argv[0]))[0]
label = QLabel()
label.setPixmap(QPixmap(os.path.join(exec_dir, "lightbackground.png")))
layout = QGridLayout()
label.setLayout(layout)
if qt == "4.0":
layout.addWidget(createGroupBox(label), 0, 0, Qt.AlignCenter)
caption = QLabel("Opaque (Default)", label)
caption.setMargin(2)
layout.addWidget(caption, 1, 0, Qt.AlignCenter | Qt.AlignTop)
elif qt == "4.1":
layout.addWidget(createGroupBox(label), 0, 0, Qt.AlignCenter)
caption = QLabel("Contents Propagated (Default)", label)
caption.setAutoFillBackground(True)
caption.setMargin(2)
layout.addWidget(caption, 1, 0, Qt.AlignCenter | Qt.AlignTop)
if qt == "4.0":
contentsWidget = createGroupBox(label)
contentsWidget.setAttribute(Qt.WA_ContentsPropagated, True)
layout.addWidget(contentsWidget, 0, 1, Qt.AlignCenter)
caption = QLabel("With WA_ContentsPropagated set", label)
caption.setMargin(2)
layout.addWidget(caption, 1, 1, Qt.AlignCenter | Qt.AlignTop)
elif qt == "4.1":
autoFillWidget = createGroupBox(label, fill = True)
layout.addWidget(autoFillWidget, 0, 1, Qt.AlignCenter)
caption = QLabel("With autoFillBackground set", label)
caption.setAutoFillBackground(True)
caption.setMargin(2)
layout.addWidget(caption, 1, 1, Qt.AlignCenter | Qt.AlignTop)
# if qt == "4.0":
# noBackgroundWidget = createGroupBox(
# label, attributes = [Qt.WA_NoBackground], fake = True)
# layout.addWidget(noBackgroundWidget, 2, 0, Qt.AlignCenter)
# caption = QLabel("With WA_NoBackground set", label)
# caption.setWordWrap(True)
# caption.setMargin(2)
# layout.addWidget(caption, 3, 0, Qt.AlignCenter | Qt.AlignTop)
# elif qt == "4.1":
# opaqueWidget = createGroupBox(
# label, attributes = [Qt.WA_OpaquePaintEvent], fake = True)
# layout.addWidget(opaqueWidget, 2, 0, Qt.AlignCenter)
# caption = QLabel("With WA_OpaquePaintEvent set", label)
# caption.setAutoFillBackground(True)
# caption.setMargin(2)
# layout.addWidget(caption, 3, 0, Qt.AlignCenter | Qt.AlignTop)
#
# if qt == "4.0":
# contentsNoBackgroundWidget = createGroupBox(
# label, attributes = [Qt.WA_ContentsPropagated, Qt.WA_NoBackground],
# fake = True)
# layout.addWidget(contentsNoBackgroundWidget, 2, 1, Qt.AlignCenter)
# caption = QLabel("With WA_ContentsPropagated and WA_NoBackground set", label)
# caption.setMargin(2)
# layout.addWidget(caption, 3, 1, Qt.AlignCenter | Qt.AlignTop)
# elif qt == "4.1":
# opaqueAutoFillWidget = createGroupBox(
# label, attributes = [Qt.WA_OpaquePaintEvent], fill = True, fake = True)
# layout.addWidget(opaqueAutoFillWidget, 2, 1, Qt.AlignCenter)
# caption = QLabel("With WA_OpaquePaintEvent and autoFillBackground set", label)
# caption.setWordWrap(True)
# caption.setAutoFillBackground(True)
# caption.setMargin(2)
# layout.addWidget(caption, 3, 1, Qt.AlignCenter | Qt.AlignTop)
if qt == "4.0":
label.setWindowTitle("Qt 4.0: Painting Standard Qt Widgets")
elif qt == "4.1":
label.setWindowTitle("Qt 4.1: Painting Standard Qt Widgets")
label.resize(480, 140)
label.show()
sys.exit(app.exec_())
|
librelab/qtmoko-test
|
qtopiacore/qt/doc/src/diagrams/contentspropagation/standardwidgets.py
|
Python
|
gpl-2.0
| 7,290
|
[
"ASE"
] |
75a6bef2e46dc405d0feedada2de935002d0ff9cd7ca7a37ff9a720a2f8cebed
|
from django.contrib.staticfiles.testing import LiveServerTestCase
from splinter import Browser
from time import sleep
from .factories import (
UserFactory, ClientFactory, CompanyFactory, CategoryFactory,
ProductFactory, QuoteFactory, QuoteModsFactory)
class LiveServerSplinterAuthTest(LiveServerTestCase):
@classmethod
def setUpClass(cls):
super(LiveServerSplinterAuthTest, cls).setUpClass()
@classmethod
def tearDownClass(cls):
super(LiveServerSplinterAuthTest, cls).tearDownClass()
def setUp(self):
self.user1 = UserFactory()
self.user1.set_password('secret')
self.user1.save()
self.client1 = ClientFactory()
self.category1 = CategoryFactory(name='Chairs')
self.category2 = CategoryFactory(name='Tables')
self.product1 = ProductFactory(category=self.category1)
self.product2 = ProductFactory(category=self.category1)
self.product3 = ProductFactory(category=self.category2)
self.browser = Browser()
self.login_helper(self.user1.username, 'secret')
def tearDown(self):
self.browser.quit()
def login_helper(self, username, password):
self.browser.visit('{}{}'.format(
self.live_server_url, '/accounts/login/')
)
self.browser.fill('username', username)
self.browser.fill('password', password)
self.browser.find_by_value('Log in').first.click()
def test_redirected_to_menu_after_login(self):
self.assertTrue(self.browser.is_text_present('Select An Option'))
def test_new_quote_button(self):
self.browser.visit('{}{}'.format(
self.live_server_url, '/menu')
)
new_quote_visible = self.browser.find_by_id('new_quote').visible
self.assertFalse(new_quote_visible)
self.browser.find_by_id('btn_new_quote').click()
self.assertTrue(self.browser.is_text_present('New Quote'))
sleep(1)
new_quote_visible = self.browser.find_by_id('new_quote').visible
self.assertTrue(new_quote_visible)
|
Estmator/EstmatorApp
|
estmator_project/estmator_project/test_functional.py
|
Python
|
mit
| 2,091
|
[
"VisIt"
] |
b3782699e764cf91aeeff4c01b5b3e6835076d402c8a809291bcdb520032545c
|
#
# @file TestReadFromFile8.py
# @brief Reads test-data/l2v4-new.xml into memory and tests it.
#
# @author Akiya Jouraku (Python conversion)
# @author Sarah Keating
#
# ====== WARNING ===== WARNING ===== WARNING ===== WARNING ===== WARNING ======
#
# DO NOT EDIT THIS FILE.
#
# This file was generated automatically by converting the file located at
# src/sbml/test/TestReadFromFile8.cpp
# using the conversion program dev/utilities/translateTests/translateTests.pl.
# Any changes made here will be lost the next time the file is regenerated.
#
# -----------------------------------------------------------------------------
# This file is part of libSBML. Please visit http://sbml.org for more
# information about SBML, and the latest version of libSBML.
#
# Copyright 2005-2010 California Institute of Technology.
# Copyright 2002-2005 California Institute of Technology and
# Japan Science and Technology Corporation.
#
# This library is free software; you can redistribute it and/or modify it
# under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation. A copy of the license agreement is provided
# in the file named "LICENSE.txt" included with this software distribution
# and also available online as http://sbml.org/software/libsbml/license.html
# -----------------------------------------------------------------------------
import sys
import unittest
import libsbml
class TestReadFromFile8(unittest.TestCase):
def test_read_l2v4_new(self):
reader = libsbml.SBMLReader()
filename = "../../sbml/test/test-data/"
filename += "l2v4-new.xml"
d = reader.readSBML(filename)
if (d == None):
pass
self.assert_( d.getLevel() == 2 )
self.assert_( d.getVersion() == 4 )
m = d.getModel()
self.assert_( m != None )
self.assert_( m.getId() == "l2v4_all" )
self.assert_( m.getNumCompartments() == 1 )
c = m.getCompartment(0)
self.assert_( c != None )
self.assert_( c.getId() == "a" )
self.assert_( c.getSize() == 1 )
self.assertEqual( False, c.getConstant() )
self.assert_( m.getNumEvents() == 1 )
e = m.getEvent(0)
self.assert_( e != None )
self.assertEqual( True, e.getUseValuesFromTriggerTime() )
self.assertEqual( True, e.isSetTrigger() )
trigger = e.getTrigger()
self.assert_( trigger != None )
ast = trigger.getMath()
self.assert_(( "lt(x, 3)" == libsbml.formulaToString(ast) ))
self.assert_( e.getNumEventAssignments() == 1 )
ea = e.getEventAssignment(0)
self.assert_( ea != None )
self.assert_( ea.getVariable() == "a" )
ast = ea.getMath()
self.assert_(( "x * p3" == libsbml.formulaToString(ast) ))
d = None
pass
def suite():
suite = unittest.TestSuite()
suite.addTest(unittest.makeSuite(TestReadFromFile8))
return suite
if __name__ == "__main__":
if unittest.TextTestRunner(verbosity=1).run(suite()).wasSuccessful() :
sys.exit(0)
else:
sys.exit(1)
|
TheCoSMoCompany/biopredyn
|
Prototype/src/libsbml-5.10.0/src/bindings/python/test/sbml/TestReadFromFile8.py
|
Python
|
bsd-3-clause
| 3,007
|
[
"VisIt"
] |
80670763bc7f8ceffafd2e3c11054b1ab5373536e767e971327f7fb7425ca3a1
|
# -*- coding: utf-8 -*-
import json
import random
import requests
import string
from typing import Any, DefaultDict, Dict, List, Optional, Set, Tuple, Union
from collections import defaultdict
from django.db import connection
from django.conf import settings
from django.http import HttpRequest, JsonResponse
from catmaid.fields import Double3D
from catmaid.models import Log, NeuronSearch, CELL_BODY_CHOICES, \
SORT_ORDERS_DICT, Relation, Class, ClassInstance, \
ClassInstanceClassInstance
class ConfigurationError(Exception):
"""Indicates some sort of configuration error"""
def __init__(self, message):
super().__init__(message)
def identity(x:Any) -> Any:
"""Simple identity."""
return x
def is_empty(iter):
""" Test if the passed in iterator is empty.
"""
for _ in iter:
return False
return True
def get_catmaid_version(request:HttpRequest) -> JsonResponse:
return JsonResponse({'SERVER_VERSION': settings.VERSION})
class parsedict(dict):
"""This is a simple wrapper, needed primarily by the request list
parser.
"""
pass
def get_request_bool(request_dict:Dict, name:Optional[str], default:Optional[bool]=None) -> Optional[bool]:
"""Extract a boolean value for the passed in parameter name in the passed
in dictionary. The boolean paramter is expected to be a string and True is
returned if it matches the string "true" (case-insensitive), False otherwise.
If the name may not be present in the dictionary, caller must provide a default
value or the return value will be None rather than boolean.
"""
value = request_dict.get(name)
return default if value is None else value.lower() == 'true'
def get_request_list(request_dict:Dict, name, default=None, map_fn=identity) -> Optional[List]:
"""Look for a list in a request dictionary where individual items are named
with or without an index. Traditionally, the CATMAID web front-end sends
the list a = [1,2,3] encoded as fields a[0]=1, a[1]=2 and a[2]=3. Using
other APIs, like jQuery's $.ajax, will encode the same list as a=1, a=2,
a=3. This method helps to parse both transparently.
"""
def flatten(d, max_index:int) -> List[List]:
"""Flatten a dict of dicts into lists of lists. Expect all keys to be
integers.
Providing a proper type for "d" here in a way that mypy is happy is nontrivial.
"""
k = []
for i in range(max_index):
v = d.get(i)
if not v and v != 0:
continue
if parsedict == type(v):
k.append(flatten(v, max_index))
else:
k.append(v)
return k
def add_items(items, name) -> List[List]:
d = parsedict()
max_index = -1
testname = name + '['
namelen = len(testname)
for k,v in items:
if k.startswith(testname):
# name[0][0] -> 0][0
index_part = k[namelen:len(k)-1]
# If there is no index part, the key format is "name[]=a,b,c"
# for each entry.
if len(index_part) == 0:
for single_value in v.split(','):
max_index += 1
d[max_index] = map_fn(single_value)
else:
indices = index_part.split('][')
target = d
# Fill in all but last index
for i in indices[:-1]:
key = int(i)
new_target = target.get(key)
if (key > max_index):
max_index = key
if not new_target:
new_target = parsedict()
target[key] = new_target
target = new_target
last_index = int(indices[-1])
target[last_index] = map_fn(v)
if (last_index > max_index):
max_index = last_index
return flatten(d, max_index + 1)
items = add_items(request_dict.items(), name)
if items:
return items
if hasattr(request_dict, 'getlist'):
items = [map_fn(v) for v in request_dict.getlist(name, [])] # type: ignore
if items:
return items
return default
def _create_relation(user, project_id:Union[int,str], relation_id, instance_a_id, instance_b_id) -> ClassInstanceClassInstance:
relation = ClassInstanceClassInstance()
relation.user = user
relation.project_id = project_id
relation.relation_id = relation_id
relation.class_instance_a_id = instance_a_id
relation.class_instance_b_id = instance_b_id
relation.save()
return relation
def insert_into_log(project_id:Union[int, str], user_id, op_type:str, location=None, freetext=None) -> Optional[Dict[str, str]]:
""" Inserts a new entry into the log table. If the location parameter is
passed, it is expected to be an iteratable (list, tuple).
"""
# valid operation types
operation_type_array = [
"rename_root",
"create_neuron",
"rename_neuron",
"remove_neuron",
"move_neuron",
"create_group",
"rename_group",
"remove_group",
"move_group",
"create_skeleton",
"rename_skeleton",
"remove_skeleton",
"move_skeleton",
"split_skeleton",
"join_skeleton",
"reroot_skeleton",
"change_confidence",
"reset_reviews"
]
if op_type not in operation_type_array:
raise ValueError(f'Operation type {op_type} not valid')
new_log = Log()
new_log.user_id = user_id
new_log.project_id = project_id
new_log.operation_type = op_type
if location is not None:
new_log.location = Double3D(*location)
if freetext is not None:
new_log.freetext = freetext
new_log.save()
return None
def order_neurons(neurons:List, order_by=None):
column, reverse = 'name', False
if order_by and (order_by in SORT_ORDERS_DICT):
column, reverse, _ = SORT_ORDERS_DICT[order_by]
if column == 'name':
neurons.sort(key=lambda x: x.name)
elif column == 'gal4':
neurons.sort(key=lambda x: x.cached_sorted_lines_str)
elif column == 'cell_body':
neurons.sort(key=lambda x: x.cached_cell_body)
else:
raise Exception("Unknown column (%s) in order_neurons" % (column,))
if reverse:
neurons.reverse()
return neurons
# Both index and visual_index take a request and kwargs and then
# return a list of neurons and a NeuronSearch form:
def get_form_and_neurons(request:HttpRequest, project_id:Union[int,str], kwargs) -> Tuple[List, NeuronSearch]:
# If we've been passed parameters in a REST-style GET request,
# create a form from them. Otherwise, if it's a POST request,
# create the form from the POST parameters. Otherwise, it's a
# plain request, so create the default search form.
rest_keys = ('search', 'cell_body_location', 'order_by')
if any((x in kwargs) for x in rest_keys):
kw_search = kwargs.get('search', None) or ""
kw_cell_body_choice = kwargs.get('cell_body_location', None) or "a"
kw_order_by = kwargs.get('order_by', None) or 'name'
search_form = NeuronSearch({'search': kw_search,
'cell_body_location': kw_cell_body_choice,
'order_by': kw_order_by})
elif request.method == 'POST':
search_form = NeuronSearch(request.POST)
else:
search_form = NeuronSearch({'search': '',
'cell_body_location': 'a',
'order_by': 'name'})
if search_form.is_valid():
search = search_form.cleaned_data['search']
cell_body_location = search_form.cleaned_data['cell_body_location']
order_by = search_form.cleaned_data['order_by']
else:
search = ''
cell_body_location = 'a'
order_by = 'name'
cell_body_choices_dict = dict(CELL_BODY_CHOICES)
all_neurons = ClassInstance.objects.filter(
project__id=project_id,
class_column__class_name='neuron',
name__icontains=search).exclude(name='orphaned pre').exclude(name='orphaned post')
if cell_body_location != 'a':
location = cell_body_choices_dict[cell_body_location]
all_neurons = all_neurons.filter(
project__id=project_id,
cici_via_a__relation__relation_name='has_cell_body',
cici_via_a__class_instance_b__name=location)
cici_qs = ClassInstanceClassInstance.objects.filter(
project__id=project_id,
relation__relation_name='has_cell_body',
class_instance_a__class_column__class_name='neuron',
class_instance_b__class_column__class_name='cell_body_location')
neuron_id_to_cell_body_location = dict(
(x.class_instance_a.id, x.class_instance_b.name) for x in cici_qs)
neuron_id_to_driver_lines:DefaultDict[Any, List] = defaultdict(list)
for cici in ClassInstanceClassInstance.objects.filter(
project__id=project_id,
relation__relation_name='expresses_in',
class_instance_a__class_column__class_name='driver_line',
class_instance_b__class_column__class_name='neuron',
):
neuron_id_to_driver_lines[cici.class_instance_b.id].append(cici.class_instance_a)
all_neurons = list(all_neurons)
for n in all_neurons:
n.cached_sorted_lines = sorted(
neuron_id_to_driver_lines[n.id], key=lambda x: x.name)
n.cached_sorted_lines_str = ", ".join(x.name for x in n.cached_sorted_lines)
n.cached_cell_body = neuron_id_to_cell_body_location.get(n.id, 'Unknown')
all_neurons = order_neurons(all_neurons, order_by)
return (all_neurons, search_form)
# TODO After all PHP functions have been replaced and all occurrence of
# this odd behavior have been found, change callers to not depend on this
# legacy functionality.
def makeJSON_legacy_list(objects) -> Dict:
'''
The PHP function makeJSON, when operating on a list of rows as
results, will output a JSON list of key-values, with keys being
integers from 0 and upwards. We return a dict with the same
structure so that it looks the same when used with json.dumps.
'''
i = 0
res = {}
for o in objects:
res[i] = o
i += 1
return res
def cursor_fetch_dictionary(cursor) -> List[Dict]:
"Returns all rows from a cursor as a dict"
desc = cursor.description
return [
dict(zip([col[0] for col in desc], row))
for row in cursor.fetchall()
]
def get_relation_to_id_map(project_id:Union[int,str], name_constraints=None, cursor=None) -> Dict:
"""
Return a mapping of relation names to relation IDs. If a list of names is
provided, only relations with those names will be included. If a cursor is
provided, this cursor will be used.
"""
if cursor:
sqlquery = "SELECT relation_name, id FROM relation WHERE project_id = %s"
params = [int(project_id)]
if name_constraints:
sqlquery += " AND (%s)" % ' OR '.join(('relation_name = %s',) * len(name_constraints))
params += (name_constraints)
cursor.execute(sqlquery, params)
return dict(cursor.fetchall())
else:
query = Relation.objects.filter(project=project_id)
if name_constraints:
query = query.filter(relation_name__in=name_constraints)
return {rname: ID for rname, ID in query.values_list("relation_name", "id")}
def get_class_to_id_map(project_id:Union[int,str], name_constraints=None, cursor=None) -> Dict:
"""
Return a mapping of class names to relation IDs. If a list of names is
provided, only classes with those names will be included. If a cursor is
provided, this cursor will be used.
"""
if cursor:
sqlquery = "SELECT class_name, id FROM class WHERE project_id = %s"
params = [int(project_id)]
if name_constraints:
sqlquery += " AND (%s)" % ' OR '.join(('class_name = %s',) * len(name_constraints))
params += (name_constraints)
cursor.execute(sqlquery, params)
return dict(cursor.fetchall())
else:
query = Class.objects.filter(project=project_id)
if name_constraints:
query = query.filter(class_name__in=name_constraints)
return {cname: ID for cname, ID in query.values_list("class_name", "id")}
def urljoin(a:str, b:str) -> str:
""" Joins to URL parts a and b while making sure this
exactly one slash inbetween. Empty strings are ignored.
"""
if a and a[-1] != '/':
a = a + '/'
if b and b[0] == '/':
b = b[1:]
return a + b
def id_generator(size:int=6, chars:str=string.ascii_lowercase + string.digits) -> str:
""" Creates a random string of the specified length.
"""
return ''.join(random.choice(chars) for x in range(size))
class Echo:
"""An object that implements just the write method of the file-like
interface. From:
https://docs.djangoproject.com/en/1.11/howto/outputting-csv/
"""
def write(self, value):
"""Write the value by returning it, instead of storing in a buffer."""
return value
def is_reachable(url:str, auth=None) -> Tuple[bool, str]:
"""Test if an URL is reachable. Returns a tuple of a boolean and an explanation.
"""
try:
r = requests.head(url, auth=auth, timeout=1)
if r.status_code >= 200 and r.status_code < 400:
return (True, 'URL accessible')
else:
return (False, r.reason)
except requests.ConnectionError:
return (False, 'No route to host')
def is_valid_host(host:str, auth=None) -> Tuple[bool, str]:
"""Test if the passed in string is a valid URI. Returns a tuple of a boolean and an explanation
"""
host = host.strip()
if 0 == len(host):
return (False, 'No URL provided')
if '://' not in host:
return (False, 'URL is missing protocol (http://...)')
reachable, reason = is_reachable(host, auth)
if not reachable:
return (False, f'URL not reachable: {reason}')
return (True, "Ok")
def batches(iterable, size):
"""Iterate in batches.
"""
source = iter(iterable)
while True:
chunk = [val for _, val in zip(range(size), source)]
if not chunk:
raise StopIteration
yield chunk
def get_last_concept_id():
cursor = connection.cursor()
cursor.execute("""
SELECT last_value FROM concept_id_seq;
""")
return cursor.fetchone()[0]
|
catmaid/CATMAID
|
django/applications/catmaid/control/common.py
|
Python
|
gpl-3.0
| 14,899
|
[
"NEURON"
] |
857a5cf115d7653ee0114e5f5fc3b42173ca40aabf5ea731947a42e40acc3cda
|
# Copyright (C) 2019 The ESPResSo project
#
# This file is part of ESPResSo.
#
# ESPResSo is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ESPResSo is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import unittest as ut
import importlib_wrapper
import numpy as np
sample, skipIfMissingFeatures = importlib_wrapper.configure_and_import(
"@SAMPLES_DIR@/observables_correlators.py")
@skipIfMissingFeatures
class Sample(ut.TestCase):
system = sample.system
def test_fcs_acf(self):
fcs_acf_weights = np.copy(sample.fcs.get_params()['args'])
np.testing.assert_allclose(fcs_acf_weights, [100., 100., 100.])
if __name__ == "__main__":
ut.main()
|
KaiSzuttor/espresso
|
testsuite/scripts/samples/test_observables_correlators.py
|
Python
|
gpl-3.0
| 1,183
|
[
"ESPResSo"
] |
5b232392d333f5a517cc88a8c5db12aad659745eb2fe52e334d483c02c85509b
|
# ============================================================================
#
# Copyright (C) 2007-2010 Conceptive Engineering bvba. All rights reserved.
# www.conceptive.be / project-camelot@conceptive.be
#
# This file is part of the Camelot Library.
#
# This file may be used under the terms of the GNU General Public
# License version 2.0 as published by the Free Software Foundation
# and appearing in the file license.txt included in the packaging of
# this file. Please review this information to ensure GNU
# General Public Licensing requirements will be met.
#
# If you are unsure which license is appropriate for your use, please
# visit www.python-camelot.com or contact project-camelot@conceptive.be
#
# This file is provided AS IS with NO WARRANTY OF ANY KIND, INCLUDING THE
# WARRANTY OF DESIGN, MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE.
#
# For use of this library in commercial applications, please contact
# project-camelot@conceptive.be
#
# ============================================================================
from PyQt4 import QtDesigner
from camelot.view.plugins import CamelotEditorPlugin
class OneToManyEditorPlugin(QtDesigner.QPyDesignerCustomWidgetPlugin, CamelotEditorPlugin):
def __init__(self, parent = None):
QtDesigner.QPyDesignerCustomWidgetPlugin.__init__(self)
from camelot.view.controls.editors import One2ManyEditor
CamelotEditorPlugin.__init__(self)
self._widget = One2ManyEditor
|
kurtraschke/camelot
|
camelot/view/plugins/onetomanyeditorplugin.py
|
Python
|
gpl-2.0
| 1,494
|
[
"VisIt"
] |
249c8fc97f72086edde90d244f296da81643dc79c4475e80e872e1886a3c7359
|
import math
import numpy
import random
import operator
import types
import numpy as np
import h5py
import IPython as ipy
import os
import sys
def one_l_print(string, pad=20):
for _ in range(pad): string += ' '
string += '\r'
sys.stdout.write(string)
sys.stdout.flush()
# Define a context manager to suppress stdout
class suppress_stdout(object):
'''
A context manager for doing a "deep suppression" of stdout in
Python, i.e. will suppress all print, even if the print originates in a
compiled C/Fortran sub-function.
'''
def __init__(self):
# Open a null file
self.null_fds = os.open(os.devnull,os.O_RDWR)
# Save the actual stdout file descriptor
self.save_fds = os.dup(1)
def __enter__(self):
# Assign the null pointers to stdout
os.dup2(self.null_fds,1)
os.close(self.null_fds)
def __exit__(self, *_):
# Re-assign the real stdout back
os.dup2(self.save_fds,1)
# Close the null file
os.close(self.save_fds)
class Transform(object):
"""
Rotation and translation represented as 4 x 4 matrix
"""
def __init__(self, matrix):
self.matrix = numpy.array(matrix)
self.matrix_inv = None
self.zRot = False
def inverse(self):
"""
Returns transformation matrix that is the inverse of this one
"""
if self.matrix_inv == None:
self.matrix_inv = numpy.linalg.inv(self.matrix)
return Transform(self.matrix_inv)
def __neg__(self):
return self.inverse()
def compose(self, trans):
"""
Returns composition of self and trans
"""
tr = Transform(numpy.dot(self.matrix, trans.matrix))
if self.zRot and trans.zRot:
return tr.pose()
else:
return tr
def __mul__(self, other):
return self.compose(other)
def pose(self, zthr = 0.01, fail = True):
"""
Convert to Pose
"""
if abs(1 - self.matrix[2][2]) < zthr:
theta = math.atan2(self.matrix[1][0], self.matrix[0][0])
return Pose(self.matrix[0][3], self.matrix[1][3], self.matrix[2][3], theta)
elif fail:
print self.matrix
raise Exception, "Not a valid 2.5D Pose"
else:
return None
def point(self):
return self.pose().point()
def applyToPoint(self, point):
"""
Transform a point into a new point.
"""
p = numpy.dot(self.matrix, point.matrix())
return Point(p[0], p[1], p[2], p[3])
def __call__(self, point):
return self.applyToPoint(point)
def __repr__(self):
return str(self.matrix)
def shortStr(self, trim = False):
return self.__repr__()
__str__ = __repr__
class Pose(Transform): # 2.5D transform
"""
Represent the x, y, z, theta pose of an object in 2.5D space
"""
def __init__(self, x, y, z, theta):
self.x = x
"""x coordinate"""
self.y = y
"""y coordinate"""
self.z = z
"""z coordinate"""
self.theta = fixAngle02Pi(theta)
"""rotation in radians"""
self.initTrans()
self.zRot = True
def initTrans(self):
cosTh = math.cos(self.theta)
sinTh = math.sin(self.theta)
self.reprString = None
Transform.__init__(self, [[cosTh, -sinTh, 0.0, self.x],
[sinTh, cosTh, 0.0, self.y],
[0.0, 0.0, 1.0, self.z],
[0, 0, 0, 1]])
def setX(self, x):
self.x = x
self.initTrans()
def setY(self, y):
self.y = y
self.initTrans()
def setZ(self, z):
self.z = z
self.initTrans()
def setTheta(self, theta):
self.theta = theta
self.initTrans()
def average(self, other, alpha):
"""
Weighted average of this pose and other
"""
return Pose(alpha * self.x + (1 - alpha) * other.x,
alpha * self.y + (1 - alpha) * other.y,
alpha * self.z + (1 - alpha) * other.z,
angleAverage(self.theta, other.theta, alpha))
def point(self):
"""
Return just the x, y, z parts represented as a C{Point}
"""
return Point(self.x, self.y, self.z)
def pose(self, fail = False):
return self
def near(self, pose, distEps, angleEps):
"""
Return True if pose is within distEps and angleEps of self
"""
return self.point().isNear(pose.point(), distEps) and \
nearAngle(self.theta, pose.pose().theta, angleEps)
def diff(self, pose):
"""
Return a pose that is the difference between self and pose (in
x, y, z, and theta)
"""
return Pose(self.x-pose.x,
self.y-pose.y,
self.z-pose.z,
fixAnglePlusMinusPi(self.theta-pose.theta))
def distance(self, pose):
"""
Return the distance between the x,y,z part of self and the x,y,z
part of pose.
"""
return self.point().distance(pose.point())
def totalDist(self, pose, angleScale = 1):
return self.distance(pose) + \
abs(fixAnglePlusMinusPi(self.theta-pose.theta)) * angleScale
def inverse(self):
"""
Return a transformation matrix that is the inverse of the
transform associated with this pose.
"""
return super(Pose, self).inverse().pose()
def xyztTuple(self):
"""
Representation of pose as a tuple of values
"""
return (self.x, self.y, self.z, self.theta)
def corrupt(self, e, eAng = None):
def corrupt(x, e):
return x + random.uniform(-e, e)
eAng = eAng or e
return Pose(corrupt(self.x, e), corrupt(self.y, e), corrupt(self.z, e),
fixAnglePlusMinusPi(corrupt(self.theta, eAng)))
def corruptGauss(self, mu, sigma, noZ = False):
def corrupt(x):
return x + random.gauss(mu, sigma)
return Pose(corrupt(self.x), corrupt(self.y),
self.z if noZ else corrupt(self.z),
fixAnglePlusMinusPi(corrupt(self.theta)))
def __repr__(self):
if not self.reprString:
# An attempt to make string equality useful
self.reprString = 'Pose[' + prettyString(self.x) + ', ' +\
prettyString(self.y) + ', ' +\
prettyString(self.z) + ', ' +\
(prettyString(self.theta) \
if self.theta <= 6.283 else prettyString(0.0))\
+ ']'
#self.reprString = 'Pose'+ prettyString(self.xyztTuple())
return self.reprString
def shortStr(self, trim = False):
return self.__repr__()
def __eq__(self, other):
return str(self) == str(other)
def __hash__(self):
return str(self).__hash__()
__str__ = __repr__
class Point:
"""
Represent a point with its x, y, z values
"""
def __init__(self, x, y, z, w=1.0):
self.x = x
"""x coordinate"""
self.y = y
"""y coordinate"""
self.z = z
"""z coordinate"""
self.w = w
"""w coordinate"""
def matrix(self):
# recompute each time to allow changing coords... reconsider this later
return numpy.array([self.x, self.y, self.z, self.w])
def isNear(self, point, distEps):
"""
Return true if the distance between self and point is less
than distEps
"""
return self.distance(point) < distEps
def distance(self, point):
"""
Euclidean distance between two points
"""
dx = self.x - point.x
dy = self.y - point.y
dz = self.z - point.z
return math.sqrt(dx*dx + dy*dy + dz*dz)
def distanceXY(self, point):
"""
Euclidean distance (squared) between two points
"""
return math.sqrt((self.x - point.x)**2 + (self.y - point.y)**2)
def distanceSq(self, point):
"""
Euclidean distance (squared) between two points
"""
dx = self.x - point.x
dy = self.y - point.y
dz = self.z - point.z
return dx*dx + dy*dy + dz*dz
def distanceSqXY(self, point):
"""
Euclidean distance (squared) between two points
"""
dx = self.x - point.x
dy = self.y - point.y
return dx*dx + dy*dy
def magnitude(self):
"""
Magnitude of this point, interpreted as a vector in 3-space
"""
return math.sqrt(self.x**2 + self.y**2 + self.z**2)
def xyzTuple(self):
"""
Return tuple of x, y, z values
"""
return (self.x, self.y, self.z)
def pose(self, angle = 0.0):
"""
Return a pose with the position of the point.
"""
return Pose(self.x, self.y, self.z, angle)
def point(self):
"""
Return a point, that is, self.
"""
return self
def __repr__(self):
if self.w == 1:
return 'Point'+ prettyString(self.xyzTuple())
if self.w == 0:
return 'Delta'+ prettyString(self.xyzTuple())
else:
return 'PointW'+ prettyString(self.xyzTuple()+(self.w,))
def shortStr(self, trim = False):
return self.__repr__()
def angleToXY(self, p):
"""
Return angle in radians of vector from self to p (in the xy projection)
"""
dx = p.x - self.x
dy = p.y - self.y
return math.atan2(dy, dx)
def add(self, point):
"""
Vector addition
"""
return Point(self.x + point.x, self.y + point.y, self.z + point.z)
def __add__(self, point):
return self.add(point)
def sub(self, point):
"""
Vector subtraction
"""
return Point(self.x - point.x, self.y - point.y, self.z - point.z)
def __sub__(self, point):
return self.sub(point)
def scale(self, s):
"""
Vector scaling
"""
return Point(self.x*s, self.y*s, self.z*s)
def __rmul__(self, s):
return self.scale(s)
def dot(self, p):
"""
Dot product
"""
return self.x*p.x + self.y*p.y + self.z*p.z
class LineXY:
"""
Line in 2D space
"""
def __init__(self, p1, p2):
"""
Initialize with two points that are on the line. Actually
store a normal and an offset from the origin
"""
self.theta = p1.angleToXY(p2)
"""normal angle"""
self.nx = -math.sin(self.theta)
"""x component of normal vector"""
self.ny = math.cos(self.theta)
"""y component of normal vector"""
self.off = p1.x * self.nx + p1.y * self.ny
"""offset along normal"""
def pointOnLine(self, p, eps):
"""
Return true if p is within eps of the line
"""
dist = abs(p.x*self.nx + p.y*self.ny - self.off)
return dist < eps
def __repr__(self):
return 'LineXY'+ prettyString((self.nx, self.ny, self.off))
def shortStr(self, trim = False):
return self.__repr__()
class LineSeg(LineXY):
"""
Line segment in 2D space
"""
def __init__(self, p1, p2):
"""
Initialize with two points that are on the line. Store one of
the points and the vector between them.
"""
self.B = p1
"""One point"""
self.C = p2
"""Other point"""
self.M = p2 - p1
"""Vector from the stored point to the other point"""
LineXY.__init__(self, p1, p2)
"""Initialize line attributes"""
def closestPoint(self, p):
"""
Return the point on the line that is closest to point p
"""
t0 = self.M.dot(p - self.B) / self.M.dot(self.M)
if t0 <= 0:
return self.B
elif t0 >= 1:
return self.B + self.M
else:
return self.B + t0 * self.M
def distToPoint(self, p):
"""
Shortest distance between point p and this line
"""
return p.distance(self.closestPoint(p))
def __repr__(self):
return 'LineSeg'+ prettyString((self.B, self.M))
#####################
def localToGlobal(pose, point):
return pose.transformPoint(point)
def localPoseToGlobalPose(pose1, pose2):
return pose1.compose(pose2)
# Given robot's pose in a global frame and a point in the global frame
# return coordinates of point in local frame
def globalToLocal(pose, point):
return pose.inverse().transformPoint(point)
def globalPoseToLocalPose(pose1, pose2):
return pose1.inverse().compose(pose2)
def sum(items):
"""
Defined to work on items other than numbers, which is not true for
the built-in sum.
"""
if len(items) == 0:
return 0
else:
result = items[0]
for item in items[1:]:
result += item
return result
def smash(lists):
return [item for sublist in lists for item in sublist]
def within(v1, v2, eps):
"""
Return True if v1 is with eps of v2. All params are numbers
"""
return abs(v1 - v2) < eps
def nearAngle(a1,a2,eps):
"""
Return True if angle a1 is within epsilon of angle a2 Don't use
within for this, because angles wrap around!
"""
return abs(fixAnglePlusMinusPi(a1-a2)) < eps
def nearlyEqual(x,y):
"""
Like within, but with the tolerance built in
"""
return abs(x-y)<.0001
def fixAnglePlusMinusPi(a):
"""
A is an angle in radians; return an equivalent angle between plus
and minus pi
"""
pi2 = 2.0* math.pi
while abs(a) > math.pi:
if a > math.pi:
a = a - pi2
elif a < -math.pi:
a = a + pi2
return a
def fixAngle02Pi(a):
"""
A is an angle in radians; return an equivalent angle between 0
and 2 pi
"""
pi2 = 2.0* math.pi
while a < 0 or a > pi2:
if a < 0:
a = a + pi2
elif a > pi2:
a = a - pi2
return a
def reverseCopy(items):
"""
Return a list that is a reversed copy of items
"""
itemCopy = items[:]
itemCopy.reverse()
return itemCopy
def dotProd(a, b):
"""
Return the dot product of two lists of numbers
"""
return sum([ai*bi for (ai,bi) in zip(a,b)])
def argmax(l, f):
"""
@param l: C{List} of items
@param f: C{Procedure} that maps an item into a numeric score
@returns: the element of C{l} that has the highest score
"""
vals = [f(x) for x in l]
return l[vals.index(max(vals))]
def argmaxWithVal(l, f):
"""
@param l: C{List} of items
@param f: C{Procedure} that maps an item into a numeric score
@returns: the element of C{l} that has the highest score and the score
"""
best = l[0]; bestScore = f(best)
for x in l:
xScore = f(x)
if xScore > bestScore:
best, bestScore = x, xScore
return (best, bestScore)
def argmaxIndex(l, f = lambda x: x):
"""
@param l: C{List} of items
@param f: C{Procedure} that maps an item into a numeric score
@returns: the index of C{l} that has the highest score
"""
best = 0; bestScore = f(l[best])
for i in range(len(l)):
xScore = f(l[i])
if xScore > bestScore:
best, bestScore = i, xScore
return (best, bestScore)
def argmaxIndexWithTies(l, f = lambda x: x):
"""
@param l: C{List} of items
@param f: C{Procedure} that maps an item into a numeric score
@returns: the index of C{l} that has the highest score
"""
best = []; bestScore = f(l[0])
for i in range(len(l)):
xScore = f(l[i])
if xScore > bestScore:
best, bestScore = [i], xScore
elif xScore == bestScore:
best, bestScore = best + [i], xScore
return (best, bestScore)
def randomMultinomial(dist):
"""
@param dist: List of positive numbers summing to 1 representing a
multinomial distribution over integers from 0 to C{len(dist)-1}.
@returns: random draw from that distribution
"""
r = random.random()
for i in range(len(dist)):
r = r - dist[i]
if r < 0.0:
return i
return "weird"
def clip(v, vMin, vMax):
"""
@param v: number
@param vMin: number (may be None, if no limit)
@param vMax: number greater than C{vMin} (may be None, if no limit)
@returns: If C{vMin <= v <= vMax}, then return C{v}; if C{v <
vMin} return C{vMin}; else return C{vMax}
"""
try:
return [clip(x, vMin, vMax) for x in v]
except TypeError:
if vMin == None:
if vMax == None:
return v
else:
return min(v, vMax)
else:
if vMax == None:
return max(v, vMin)
else:
return max(min(v, vMax), vMin)
def flatten(M):
"""
basically a nice wrapper around reshape
@param M: matrix
@returns v: flattened matrix into a vector
"""
return np.reshape(M, (M.shape[0]*M.shape[1]))
def sign(x):
"""
Return 1, 0, or -1 depending on the sign of x
"""
if x > 0.0:
return 1
elif x == 0.0:
return 0
else:
return -1
def make2DArray(dim1, dim2, initValue):
"""
Return a list of lists representing a 2D array with dimensions
dim1 and dim2, filled with initialValue
"""
result = []
for i in range(dim1):
result = result + [makeVector(dim2, initValue)]
return result
def make2DArrayFill(dim1, dim2, initFun):
"""
Return a list of lists representing a 2D array with dimensions
dim1 and dim2, filled by calling initFun with every pair of
indices
"""
result = []
for i in range(dim1):
result = result + [makeVectorFill(dim2, lambda j: initFun(i, j))]
return result
def make3DArray(dim1, dim2, dim3, initValue):
"""
Return a list of lists of lists representing a 3D array with dimensions
dim1, dim2, and dim3 filled with initialValue
"""
result = []
for i in range(dim1):
result = result + [make2DArray(dim2, dim3, initValue)]
return result
def mapArray3D(array, f):
"""
Map a function over the whole array. Side effects the array. No
return value.
"""
for i in range(len(array)):
for j in range(len(array[0])):
for k in range(len(array[0][0])):
array[i][j][k] = f(array[i][j][k])
def makeVector(dim, initValue):
"""
Return a list of dim copies of initValue
"""
return [initValue]*dim
def makeVectorFill(dim, initFun):
"""
Return a list resulting from applying initFun to values from 0 to
dim-1
"""
return [initFun(i) for i in range(dim)]
def prettyString(struct):
"""
Make nicer looking strings for printing, mostly by truncating
floats
"""
if type(struct) == list:
return '[' + ', '.join([prettyString(item) for item in struct]) + ']'
elif type(struct) == tuple:
return '(' + ', '.join([prettyString(item) for item in struct]) + ')'
elif type(struct) == dict:
return '{' + ', '.join([str(item) + ':' + prettyString(struct[item]) \
for item in struct]) + '}'
elif type(struct) == float or type(struct) == numpy.float64:
struct = round(struct, 3)
if struct == 0: struct = 0 # catch stupid -0.0
return "%5.3f" % struct
else:
return str(struct)
def swapRange(x, y):
if x < y:
return range(x, y)
if x > y:
r = range(y, x)
r.reverse()
return r
return [x]
def avg(a, b):
if type(a) in (types.TupleType, types.ListType) and \
type(b) in (types.TupleType, types.ListType) and \
len(a) == len(b):
return tuple([avg(a[i], b[i]) for i in range(len(a))])
else:
return (a + b)/2.0
def recoverPath(volume, start, end):
if not volume:
return None
p = []
current = start
while current != end:
p.append(current)
successors = [[current[0] + i, current[1] + j] for (i,j) in [(1,0), (0, 1), (-1, 0), (0, -1)]]
for v in volume:
if list(v) in successors and not list(v) in p:
current = list(v)
continue
p.append(end)
return p
class SymbolGenerator:
"""
Generate new symbols guaranteed to be different from one another
Optionally, supply a prefix for mnemonic purposes
Call gensym("foo") to get a symbol like 'foo37'
"""
def __init__(self):
self.count = 0
def gensym(self, prefix = 'i'):
self.count += 1
return prefix + '_' + str(self.count)
gensym = SymbolGenerator().gensym
"""Call this function to get a new symbol"""
def logGaussian(x, mu, sigma):
"""
Log of the value of the gaussian distribution with mean mu and
stdev sigma at value x
"""
return -((x-mu)**2 / (2*sigma**2)) - math.log(sigma*math.sqrt(2*math.pi))
def gaussian(x, mu, sigma):
"""
Value of the gaussian distribution with mean mu and
stdev sigma at value x
"""
return math.exp(-((x-mu)**2 / (2*sigma**2))) /(sigma*math.sqrt(2*math.pi))
def lineIndices((i0, j0), (i1, j1)):
"""
Takes two cells in the grid (each described by a pair of integer
indices), and returns a list of the cells in the grid that are on the
line segment between the cells.
"""
ans = [(i0,j0)]
di = i1 - i0
dj = j1 - j0
t = 0.5
if abs(di) > abs(dj): # slope < 1
m = float(dj) / float(di) # compute slope
t += j0
if di < 0: di = -1
else: di = 1
m *= di
while (i0 != i1):
i0 += di
t += m
ans.append((i0, int(t)))
else:
if dj != 0: # slope >= 1
m = float(di) / float(dj) # compute slope
t += i0
if dj < 0: dj = -1
else: dj = 1
m *= dj
while j0 != j1:
j0 += dj
t += m
ans.append((int(t), j0))
return ans
def angleDiff(x, y):
twoPi = 2*math.pi
z = (x - y)%twoPi
if z > math.pi:
return z - twoPi
else:
return z
def inRange(v, r):
return r[0] <= v <= r[1]
def rangeOverlap(r1, r2):
return r2[0] <= r1[1] and r1[0] <= r2[1]
def rangeIntersect(r1, r2):
return (max(r1[0], r2[0]), min(r1[1], r2[1]))
def average(stuff):
return (1./float(len(stuff)))*sum(stuff)
def tuplify(x):
if isIterable(x):
return tuple([tuplify(y) for y in x])
else:
return x
def squash(listOfLists):
return reduce(operator.add, listOfLists)
# Average two angles
def angleAverage(th1, th2, alpha):
return math.atan2(alpha * math.sin(th1) + (1 - alpha) * math.sin(th2),
alpha * math.cos(th1) + (1 - alpha) * math.cos(th2))
def floatRange(lo, hi, stepsize):
"""
@returns: a list of numbers, starting with C{lo}, and increasing
by C{stepsize} each time, until C{hi} is equaled or exceeded.
C{lo} must be less than C{hi}; C{stepsize} must be greater than 0.
"""
if stepsize == 0:
print 'Stepsize is 0 in floatRange'
result = []
v = lo
while v <= hi:
result.append(v)
v += stepsize
return result
def euclideanDistance(x, y):
return math.sqrt(sum([(xi - yi)**2 for (xi, yi) in zip(x, y)]))
def pop(x):
if isinstance(x, list):
if len(x) > 0:
return x.pop(0)
else:
return None
else:
try:
return x.next()
except StopIteration:
return None
def isIterable(x):
if type(x) in (str, unicode):
return False
try:
x_iter = iter(x)
return True
except:
return False
def tangentSpaceAdd(a, b):
res = a + b
for i in range(3, len(res), 4):
res[i, 0] = fixAnglePlusMinusPi(res[i, 0])
return res
def scalarMult(l, c):
return type(l)([i*c for i in l])
def componentAdd(a, b):
return type(a)([i + j for (i, j) in zip(a, b)])
def componentSubtract(a, b):
return componentAdd(a, [-1*i for i in b])
|
dhadfieldmenell/bootstrapping-lfd
|
scripts/dhm_utils.py
|
Python
|
bsd-2-clause
| 24,683
|
[
"Gaussian"
] |
a748241fbaf1d312b2e4572f475c8129252a6de44914566683fcf6cc2b793c43
|
"""
This module implements all the functions to communicate with other Python
modules (PIL, matplotlib, mayavi, etc.)
"""
import numpy as np
def PIL_to_npimage(im):
""" Transforms a PIL/Pillow image into a numpy RGB(A) image.
Actually all this do is returning numpy.array(im)."""
return np.array(im)
#w,h = im.size
#d = (4 if im.mode=="RGBA" else 3)
#return +np.frombuffer(im.tobytes(), dtype='uint8').reshape((h,w,d))
def mplfig_to_npimage(fig):
""" Converts a matplotlib figure to a RGB frame after updating the canvas"""
# only the Agg backend now supports the tostring_rgb function
from matplotlib.backends.backend_agg import FigureCanvasAgg
canvas = FigureCanvasAgg(fig)
canvas.draw() # update/draw the elements
# get the width and the height to resize the matrix
l,b,w,h = canvas.figure.bbox.bounds
w, h = int(w), int(h)
# exports the canvas to a string buffer and then to a numpy nd.array
buf = canvas.tostring_rgb()
image= np.fromstring(buf,dtype=np.uint8)
return image.reshape(h,w,3)
|
kerimlcr/ab2017-dpyo
|
ornek/moviepy/moviepy-0.2.2.12/moviepy/video/io/bindings.py
|
Python
|
gpl-3.0
| 1,079
|
[
"Mayavi"
] |
e2048b8de383a15b3e2817360b6b92f9c18d5019e3f6fb7fc79054c02ba1259c
|
# Copyright 2016 Autodesk Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from .quantity import *
# Constants
unity = ureg.angstrom / ureg.angstrom
imi = 1.0j
pi = np.pi
sqrtpi = np.sqrt(np.pi)
sqrt2 = np.sqrt(2.0)
epsilon_0 = ureg.epsilon_0
c = ureg.speed_of_light
alpha = ureg.fine_structure_constant
hbar = ureg.hbar
boltz = boltzmann_constant = k_b = ureg.boltzmann_constant
avogadro = (1.0 * ureg.mole * ureg.avogadro_number).to(unity).magnitude
# atomic units
hartree = ureg.hartree
a0 = bohr = ureg.bohr
atomic_time = t0 = ureg.t0
electron_mass = m_e = ureg.electron_mass
electron_charge = q_e = ureg.elementary_charge
# useful units
fs = femtoseconds = ureg.fs
ps = picoseconds = ureg.ps
eV = electronvolts = ureg.eV
kcalpermol = ureg.kcalpermol
gpermol = ureg.gpermol
kjpermol = ureg.kjpermol
radians = radian = rad = ureg.rad
degrees = degree = deg = ureg.degrees
amu = da = dalton = ureg.amu
kelvin = ureg.kelvin
nm = ureg.nanometers
ang = angstrom = ureg.ang
molar = ureg.mole / ureg.liter
debye = ureg.debye
# sets default unit systems
def_length = angstrom
def_time = fs
def_vel = angstrom / fs
def_mass = amu
def_momentum = def_mass * def_vel
def_force = def_momentum / def_time
def_energy = eV
|
tkzeng/molecular-design-toolkit
|
moldesign/units/constants.py
|
Python
|
apache-2.0
| 1,717
|
[
"Avogadro",
"Dalton"
] |
c1357878cbc9d2b3f7f55c1cd9bfc645139857d7e178c8ed7546b3119cbcb26a
|
# ______________________________________________________________________
'''Defines a bytecode based LLVM translator for llpython code.
'''
# ______________________________________________________________________
# Module imports
from __future__ import absolute_import
import opcode
import types
import logging
import llvm.core as lc
from . import opcode_util
from . import bytetype
from .bytecode_visitor import BytecodeFlowVisitor
from .byte_flow import BytecodeFlowBuilder
from .byte_control import ControlFlowBuilder
from .phi_injector import PhiInjector, synthetic_opname
# ______________________________________________________________________
# Module data
logger = logging.getLogger(__name__)
# XXX Stolen from numba.translate:
_compare_mapping_float = {'>':lc.FCMP_OGT,
'<':lc.FCMP_OLT,
'==':lc.FCMP_OEQ,
'>=':lc.FCMP_OGE,
'<=':lc.FCMP_OLE,
'!=':lc.FCMP_ONE}
_compare_mapping_sint = {'>':lc.ICMP_SGT,
'<':lc.ICMP_SLT,
'==':lc.ICMP_EQ,
'>=':lc.ICMP_SGE,
'<=':lc.ICMP_SLE,
'!=':lc.ICMP_NE}
# XXX Stolen from numba.llvm_types:
class LLVMCaster (object):
def build_pointer_cast(_, builder, lval1, lty2):
return builder.bitcast(lval1, lty2)
def build_int_cast(_, builder, lval1, lty2, unsigned = False):
width1 = lval1.type.width
width2 = lty2.width
ret_val = lval1
if width2 > width1:
if unsigned:
ret_val = builder.zext(lval1, lty2)
else:
ret_val = builder.sext(lval1, lty2)
elif width2 < width1:
ret_val = builder.trunc(lval1, lty2)
return ret_val
def build_float_ext(_, builder, lval1, lty2):
return builder.fpext(lval1, lty2)
def build_float_trunc(_, builder, lval1, lty2):
return builder.fptrunc(lval1, lty2)
def build_int_to_float_cast(_, builder, lval1, lty2, unsigned = False):
ret_val = None
if unsigned:
ret_val = builder.uitofp(lval1, lty2)
else:
ret_val = builder.sitofp(lval1, lty2)
return ret_val
def build_int_to_ptr_cast(_, builder, lval1, lty2):
return builder.inttoptr(lval1, lty2)
def build_float_to_int_cast(_, builder, lval1, lty2, unsigned = False):
ret_val = None
if unsigned:
ret_val = builder.fptoui(lval1, lty2)
else:
ret_val = builder.fptosi(lval1, lty2)
return ret_val
CAST_MAP = {
lc.TYPE_POINTER : build_pointer_cast,
lc.TYPE_INTEGER: build_int_cast,
(lc.TYPE_FLOAT, lc.TYPE_DOUBLE) : build_float_ext,
(lc.TYPE_DOUBLE, lc.TYPE_FLOAT) : build_float_trunc,
(lc.TYPE_INTEGER, lc.TYPE_FLOAT) : build_int_to_float_cast,
(lc.TYPE_INTEGER, lc.TYPE_DOUBLE) : build_int_to_float_cast,
(lc.TYPE_INTEGER, lc.TYPE_POINTER) : build_int_to_ptr_cast,
(lc.TYPE_FLOAT, lc.TYPE_INTEGER) : build_float_to_int_cast,
(lc.TYPE_DOUBLE, lc.TYPE_INTEGER) : build_float_to_int_cast,
}
@classmethod
def build_cast(cls, builder, lval1, lty2, *args, **kws):
ret_val = lval1
lty1 = lval1.type
lkind1 = lty1.kind
lkind2 = lty2.kind
if lkind1 == lkind2:
if lkind1 in cls.CAST_MAP:
ret_val = cls.CAST_MAP[lkind1](cls, builder, lval1, lty2,
*args, **kws)
else:
raise NotImplementedError(lkind1)
else:
map_index = (lkind1, lkind2)
if map_index in cls.CAST_MAP:
ret_val = cls.CAST_MAP[map_index](cls, builder, lval1, lty2,
*args, **kws)
else:
raise NotImplementedError(lkind1, lkind2)
return ret_val
# ______________________________________________________________________
# Class definitions
class LLVMTranslator (BytecodeFlowVisitor):
'''Transformer responsible for visiting a set of bytecode flow
trees, emitting LLVM code.
Unlike other translators in :py:mod:`llpython`, this
incorporates the full transformation chain, starting with
:py:class:`llpython.byte_flow.BytecodeFlowBuilder`, then
:py:class:`llpython.byte_control.ControlFlowBuilder`, and
then :py:class:`llpython.phi_injector.PhiInjector`.'''
def __init__ (self, llvm_module = None, *args, **kws):
'''Constructor for LLVMTranslator.'''
super(LLVMTranslator, self).__init__(*args, **kws)
if llvm_module is None:
llvm_module = lc.Module.new('Translated_Module_%d' % (id(self),))
self.llvm_module = llvm_module
self.bytecode_flow_builder = BytecodeFlowBuilder()
self.control_flow_builder = ControlFlowBuilder()
self.phi_injector = PhiInjector()
def translate (self, function, llvm_type = None, llvm_function = None,
env = None):
'''Translate a function to the given LLVM function type.
If no type is given, then assume the function is of LLVM type
"void ()".
The optional env parameter allows extension of the global
environment.'''
if llvm_type is None:
if llvm_function is None:
llvm_type = lc.Type.function(bytetype.lvoid, ())
else:
llvm_type = llvm_function.type.pointee
if env is None:
env = {}
else:
env = env.copy()
env.update((name, method)
for name, method in lc.Builder.__dict__.items()
if not name.startswith('_'))
env.update((name, value)
for name, value in bytetype.__dict__.items()
if not name.startswith('_'))
self.loop_stack = []
self.llvm_type = llvm_type
self.target_function_name = env.get('target_function_name',
function.__name__)
self.function = function
self.code_obj = opcode_util.get_code_object(function)
func_globals = getattr(function, 'func_globals',
getattr(function, '__globals__', {})).copy()
func_globals.update(env)
self.globals = func_globals
nargs = self.code_obj.co_argcount
self.cfg = self.control_flow_builder.visit(
opcode_util.build_basic_blocks(self.code_obj), nargs)
self.cfg.blocks = self.bytecode_flow_builder.visit_cfg(self.cfg)
self.llvm_function = llvm_function
flow = self.phi_injector.visit_cfg(self.cfg, nargs)
ret_val = self.visit(flow)
del self.cfg
del self.globals
del self.code_obj
del self.target_function_name
del self.function
del self.llvm_type
del self.loop_stack
return ret_val
def enter_flow_object (self, flow):
super(LLVMTranslator, self).enter_flow_object(flow)
if self.llvm_function is None:
self.llvm_function = self.llvm_module.add_function(
self.llvm_type, self.target_function_name)
self.llvm_blocks = {}
self.llvm_definitions = {}
self.pending_phis = {}
for block in self.block_list:
if 0 in self.cfg.blocks_reaching[block]:
bb = self.llvm_function.append_basic_block(
'BLOCK_%d' % (block,))
self.llvm_blocks[block] = bb
def exit_flow_object (self, flow):
super(LLVMTranslator, self).exit_flow_object(flow)
ret_val = self.llvm_function
del self.pending_phis
del self.llvm_definitions
del self.llvm_blocks
if __debug__ and logger.getEffectiveLevel() < logging.DEBUG:
logger.debug(str(ret_val))
return ret_val
def enter_block (self, block):
ret_val = False
if block in self.llvm_blocks:
self.llvm_block = self.llvm_blocks[block]
self.builder = lc.Builder.new(self.llvm_block)
ret_val = True
return ret_val
def exit_block (self, block):
bb_instrs = self.llvm_block.instructions
if ((len(bb_instrs) == 0) or
(not bb_instrs[-1].is_terminator)):
out_blocks = list(self.cfg.blocks_out[block])
assert len(out_blocks) == 1
self.builder.branch(self.llvm_blocks[out_blocks[0]])
del self.llvm_block
del self.builder
def visit_synthetic_op (self, i, op, arg, *args, **kws):
method = getattr(self, 'op_%s' % (synthetic_opname[op],))
return method(i, op, arg, *args, **kws)
def op_REF_ARG (self, i, op, arg, *args, **kws):
return [self.llvm_function.args[arg]]
def op_BUILD_PHI (self, i, op, arg, *args, **kws):
phi_type = None
incoming = []
pending = []
for child_arg in arg:
child_block, _, child_opname, child_arg, _ = child_arg
assert child_opname == 'REF_DEF'
if child_arg in self.llvm_definitions:
child_def = self.llvm_definitions[child_arg]
if phi_type is None:
phi_type = child_def.type
incoming.append((child_block, child_def))
else:
pending.append((child_arg, child_block))
phi = self.builder.phi(phi_type)
for block_index, defn in incoming:
phi.add_incoming(defn, self.llvm_blocks[block_index])
for defn_index, block_index in pending:
if defn_index not in self.pending_phis:
self.pending_phis[defn_index] = []
self.pending_phis[defn_index].append((phi, block_index))
return [phi]
def op_DEFINITION (self, i, op, def_index, *args, **kws):
assert len(args) == 1
arg = args[0]
if def_index in self.pending_phis:
for phi, block_index in self.pending_phis[def_index]:
phi.add_incoming(arg, self.llvm_blocks[block_index])
self.llvm_definitions[def_index] = arg
return args
def op_REF_DEF (self, i, op, arg, *args, **kws):
return [self.llvm_definitions[arg]]
def op_BINARY_ADD (self, i, op, arg, *args, **kws):
arg1, arg2 = args
if arg1.type.kind == lc.TYPE_INTEGER:
ret_val = [self.builder.add(arg1, arg2)]
elif arg1.type.kind in (lc.TYPE_FLOAT, lc.TYPE_DOUBLE):
ret_val = [self.builder.fadd(arg1, arg2)]
elif arg1.type.kind == lc.TYPE_POINTER:
ret_val = [self.builder.gep(arg1, [arg2])]
else:
raise NotImplementedError("LLVMTranslator.op_BINARY_ADD for %r" %
(args,))
return ret_val
def op_BINARY_AND (self, i, op, arg, *args, **kws):
return [self.builder.and_(args[0], args[1])]
def op_BINARY_DIVIDE (self, i, op, arg, *args, **kws):
arg1, arg2 = args
if arg1.type.kind == lc.TYPE_INTEGER:
ret_val = [self.builder.sdiv(arg1, arg2)]
elif arg1.type.kind in (lc.TYPE_FLOAT, lc.TYPE_DOUBLE):
ret_val = [self.builder.fdiv(arg1, arg2)]
else:
raise NotImplementedError("LLVMTranslator.op_BINARY_DIVIDE for %r"
% (args,))
return ret_val
def op_BINARY_FLOOR_DIVIDE (self, i, op, arg, *args, **kws):
raise NotImplementedError("LLVMTranslator.op_BINARY_FLOOR_DIVIDE")
def op_BINARY_LSHIFT (self, i, op, arg, *args, **kws):
return [self.builder.shl(args[0], args[1])]
def op_BINARY_MODULO (self, i, op, arg, *args, **kws):
arg1, arg2 = args
if arg1.type.kind == lc.TYPE_INTEGER:
ret_val = [self.builder.srem(arg1, arg2)]
elif arg1.type.kind in (lc.TYPE_FLOAT, lc.TYPE_DOUBLE):
ret_val = [self.builder.frem(arg1, arg2)]
else:
raise NotImplementedError("LLVMTranslator.op_BINARY_MODULO for %r"
% (args,))
return ret_val
def op_BINARY_MULTIPLY (self, i, op, arg, *args, **kws):
arg1, arg2 = args
if arg1.type.kind == lc.TYPE_INTEGER:
ret_val = [self.builder.mul(arg1, arg2)]
elif arg1.type.kind in (lc.TYPE_FLOAT, lc.TYPE_DOUBLE):
ret_val = [self.builder.fmul(arg1, arg2)]
else:
raise NotImplementedError("LLVMTranslator.op_BINARY_MULTIPLY for "
"%r" % (args,))
return ret_val
def op_BINARY_OR (self, i, op, arg, *args, **kws):
return [self.builder.or_(args[0], args[1])]
def op_BINARY_POWER (self, i, op, arg, *args, **kws):
raise NotImplementedError("LLVMTranslator.op_BINARY_POWER")
def op_BINARY_RSHIFT (self, i, op, arg, *args, **kws):
return [self.builder.lshr(args[0], args[1])]
def op_BINARY_SUBSCR (self, i, op, arg, *args, **kws):
arr_val = args[0]
index_vals = args[1:]
ret_val = gep_result = self.builder.gep(arr_val, index_vals)
if (gep_result.type.kind == lc.TYPE_POINTER and
gep_result.type.pointee.kind != lc.TYPE_POINTER):
ret_val = self.builder.load(gep_result)
return [ret_val]
def op_BINARY_SUBTRACT (self, i, op, arg, *args, **kws):
arg1, arg2 = args
if arg1.type.kind == lc.TYPE_INTEGER:
ret_val = [self.builder.sub(arg1, arg2)]
elif arg1.type.kind in (lc.TYPE_FLOAT, lc.TYPE_DOUBLE):
ret_val = [self.builder.fsub(arg1, arg2)]
else:
raise NotImplementedError("LLVMTranslator.op_BINARY_SUBTRACT for "
"%r" % (args,))
return ret_val
op_BINARY_TRUE_DIVIDE = op_BINARY_DIVIDE
def op_BINARY_XOR (self, i, op, arg, *args, **kws):
return [self.builder.xor(args[0], args[1])]
def op_BREAK_LOOP (self, i, op, arg, *args, **kws):
return [self.builder.branch(self.llvm_blocks[arg])]
def op_BUILD_SLICE (self, i, op, arg, *args, **kws):
raise NotImplementedError("LLVMTranslator.op_BUILD_SLICE")
def op_BUILD_TUPLE (self, i, op, arg, *args, **kws):
return args
def op_CALL_FUNCTION (self, i, op, arg, *args, **kws):
fn = args[0]
args = args[1:]
fn_name = getattr(fn, '__name__', None)
if isinstance(fn, (types.FunctionType, types.MethodType)):
ret_val = [fn(self.builder, *args)]
elif isinstance(fn, lc.Value):
ret_val = [self.builder.call(fn, args)]
elif isinstance(fn, lc.Type):
if isinstance(fn, lc.FunctionType):
ret_val = [self.builder.call(
self.llvm_module.get_or_insert_function(fn, fn_name),
args)]
else:
assert len(args) == 1
ret_val = [LLVMCaster.build_cast(self.builder, args[0], fn)]
else:
raise NotImplementedError("Don't know how to call %s() (%r @ %d)!"
% (fn_name, fn, i))
return ret_val
def op_CALL_FUNCTION_KW (self, i, op, arg, *args, **kws):
raise NotImplementedError("LLVMTranslator.op_CALL_FUNCTION_KW")
def op_CALL_FUNCTION_VAR (self, i, op, arg, *args, **kws):
args = list(args)
var_args = list(args.pop())
args.extend(var_args)
return self.op_CALL_FUNCTION(i, op, arg, *args, **kws)
def op_CALL_FUNCTION_VAR_KW (self, i, op, arg, *args, **kws):
raise NotImplementedError("LLVMTranslator.op_CALL_FUNCTION_VAR_KW")
def op_COMPARE_OP (self, i, op, arg, *args, **kws):
arg1, arg2 = args
cmp_kind = opcode.cmp_op[arg]
if isinstance(arg1.type, lc.IntegerType):
ret_val = [self.builder.icmp(_compare_mapping_sint[cmp_kind],
arg1, arg2)]
elif arg1.type.kind in (lc.TYPE_FLOAT, lc.TYPE_DOUBLE):
ret_val = [self.builder.fcmp(_compare_mapping_float[cmp_kind],
arg1, arg2)]
else:
raise NotImplementedError('Comparison of type %r' % (arg1.type,))
return ret_val
def op_CONTINUE_LOOP (self, i, op, arg, *args, **kws):
raise NotImplementedError("LLVMTranslator.op_CONTINUE_LOOP")
def op_DELETE_ATTR (self, i, op, arg, *args, **kws):
raise NotImplementedError("LLVMTranslator.op_DELETE_ATTR")
def op_DELETE_SLICE (self, i, op, arg, *args, **kws):
raise NotImplementedError("LLVMTranslator.op_DELETE_SLICE")
def op_FOR_ITER (self, i, op, arg, *args, **kws):
raise NotImplementedError("LLVMTranslator.op_FOR_ITER")
def op_GET_ITER (self, i, op, arg, *args, **kws):
raise NotImplementedError("LLVMTranslator.op_GET_ITER")
op_INPLACE_ADD = op_BINARY_ADD
op_INPLACE_AND = op_BINARY_AND
op_INPLACE_DIVIDE = op_BINARY_DIVIDE
op_INPLACE_FLOOR_DIVIDE = op_BINARY_FLOOR_DIVIDE
op_INPLACE_LSHIFT = op_BINARY_LSHIFT
op_INPLACE_MODULO = op_BINARY_MODULO
op_INPLACE_MULTIPLY = op_BINARY_MULTIPLY
op_INPLACE_OR = op_BINARY_OR
op_INPLACE_POWER = op_BINARY_POWER
op_INPLACE_RSHIFT = op_BINARY_RSHIFT
op_INPLACE_SUBTRACT = op_BINARY_SUBTRACT
op_INPLACE_TRUE_DIVIDE = op_BINARY_TRUE_DIVIDE
op_INPLACE_XOR = op_BINARY_XOR
def op_JUMP_ABSOLUTE (self, i, op, arg, *args, **kws):
return [self.builder.branch(self.llvm_blocks[arg])]
def op_JUMP_FORWARD (self, i, op, arg, *args, **kws):
return [self.builder.branch(self.llvm_blocks[i + arg + 3])]
def op_JUMP_IF_FALSE (self, i, op, arg, *args, **kws):
cond = args[0]
block_false = self.llvm_blocks[i + 3 + arg]
block_true = self.llvm_blocks[i + 3]
return [self.builder.cbranch(cond, block_true, block_false)]
# raise NotImplementedError("LLVMTranslator.op_JUMP_IF_FALSE")
def op_JUMP_IF_FALSE_OR_POP (self, i, op, arg, *args, **kws):
raise NotImplementedError("LLVMTranslator.op_JUMP_IF_FALSE_OR_POP")
def op_JUMP_IF_TRUE (self, i, op, arg, *args, **kws):
raise NotImplementedError("LLVMTranslator.op_JUMP_IF_TRUE")
def op_JUMP_IF_TRUE_OR_POP (self, i, op, arg, *args, **kws):
raise NotImplementedError("LLVMTranslator.op_JUMP_IF_TRUE_OR_POP")
def op_LOAD_ATTR (self, i, op, arg, *args, **kws):
raise NotImplementedError("LLVMTranslator.op_LOAD_ATTR")
def op_LOAD_CONST (self, i, op, arg, *args, **kws):
py_val = self.code_obj.co_consts[arg]
if isinstance(py_val, int):
ret_val = [lc.Constant.int(bytetype.lc_int, py_val)]
elif isinstance(py_val, float):
ret_val = [lc.Constant.double(py_val)]
elif py_val == None:
ret_val = [None]
else:
raise NotImplementedError('Constant converstion for %r' %
(py_val,))
return ret_val
def op_LOAD_DEREF (self, i, op, arg, *args, **kws):
name = self.code_obj.co_freevars[arg]
ret_val = self.globals[name]
if isinstance(ret_val, lc.Type) and not hasattr(ret_val, '__name__'):
ret_val.__name__ = name
return [ret_val]
def op_LOAD_GLOBAL (self, i, op, arg, *args, **kws):
name = self.code_obj.co_names[arg]
ret_val = self.globals[name]
if isinstance(ret_val, lc.Type) and not hasattr(ret_val, '__name__'):
ret_val.__name__ = name
return [ret_val]
def op_POP_BLOCK (self, i, op, arg, *args, **kws):
self.loop_stack.pop()
return [self.builder.branch(self.llvm_blocks[i + 1])]
def op_POP_JUMP_IF_FALSE (self, i, op, arg, *args, **kws):
return [self.builder.cbranch(args[0], self.llvm_blocks[i + 3],
self.llvm_blocks[arg])]
def op_POP_JUMP_IF_TRUE (self, i, op, arg, *args, **kws):
raise NotImplementedError("LLVMTranslator.op_POP_JUMP_IF_TRUE")
def op_POP_TOP (self, i, op, arg, *args, **kws):
return args
def op_RETURN_VALUE (self, i, op, arg, *args, **kws):
if args[0] is None:
ret_val = [self.builder.ret_void()]
else:
ret_val = [self.builder.ret(args[0])]
return ret_val
def op_SETUP_LOOP (self, i, op, arg, *args, **kws):
self.loop_stack.append((i, arg))
return [self.builder.branch(self.llvm_blocks[i + 3])]
def op_SLICE (self, i, op, arg, *args, **kws):
raise NotImplementedError("LLVMTranslator.op_SLICE")
def op_STORE_ATTR (self, i, op, arg, *args, **kws):
raise NotImplementedError("LLVMTranslator.op_STORE_ATTR")
def op_STORE_SLICE (self, i, op, arg, *args, **kws):
raise NotImplementedError("LLVMTranslator.op_STORE_SLICE")
def op_STORE_SUBSCR (self, i, op, arg, *args, **kws):
store_val, arr_val, index_val = args
dest_addr = self.builder.gep(arr_val, [index_val])
return [self.builder.store(store_val, dest_addr)]
def op_UNARY_CONVERT (self, i, op, arg, *args, **kws):
raise NotImplementedError("LLVMTranslator.op_UNARY_CONVERT")
def op_UNARY_INVERT (self, i, op, arg, *args, **kws):
raise NotImplementedError("LLVMTranslator.op_UNARY_INVERT")
def op_UNARY_NEGATIVE (self, i, op, arg, *args, **kws):
raise NotImplementedError("LLVMTranslator.op_UNARY_NEGATIVE")
def op_UNARY_NOT (self, i, op, arg, *args, **kws):
raise NotImplementedError("LLVMTranslator.op_UNARY_NOT")
def op_UNARY_POSITIVE (self, i, op, arg, *args, **kws):
raise NotImplementedError("LLVMTranslator.op_UNARY_POSITIVE")
# ______________________________________________________________________
def translate_function (func, lltype, llvm_module = None, **kws):
'''Given a function and an LLVM function type, emit LLVM code for
that function using a new LLVMTranslator instance.'''
translator = LLVMTranslator(llvm_module)
ret_val = translator.translate(func, lltype, env = kws)
return ret_val
# ______________________________________________________________________
def translate_into_function (py_function, llvm_function, **kws):
translator = LLVMTranslator(llvm_function.module)
ret_val = translator.translate(py_function, llvm_function = llvm_function,
env = kws)
return ret_val
# ______________________________________________________________________
def llpython (lltype, llvm_module = None, **kws):
'''Decorator version of translate_function().'''
def _llpython (func):
return translate_function(func, lltype, llvm_module, **kws)
return _llpython
# ______________________________________________________________________
def llpython_into (llvm_function, **kws):
def _llpython_into (func):
return translate_into_function(llvm_function, func, **kws)
return _llpython_into
# ______________________________________________________________________
# Main (self-test) routine
def main (*args):
from tests import llfuncs, llfunctys
if not args:
args = ('doslice',)
elif 'all' in args:
args = [llfunc
for llfunc in dir(llfuncs) if not llfunc.startswith('_')]
llvm_module = lc.Module.new('test_module')
for arg in args:
translate_function(getattr(llfuncs, arg), getattr(llfunctys, arg),
llvm_module)
print(llvm_module)
# ______________________________________________________________________
if __name__ == '__main__':
import sys
main(*sys.argv[1:])
# ______________________________________________________________________
# End of byte_translator.py
|
llvmpy/llvmpy
|
llpython/byte_translator.py
|
Python
|
bsd-3-clause
| 23,939
|
[
"VisIt"
] |
ec7ebeffa289318289249117d7832dc20a214e77d94c3a0c3c3f2111d47042b0
|
#!/usr/bin/python
# Little script to convert output files from charmm/OPTIM to strict CRD format for use in VMD.
# Note that the .cor file extension is best for use with VMD, so that it doesn't get confused
# between charmm and amber. Open as "vmd newoutput.cor"
# Usage:
# <directory path>/standardize_crd.py <input filename>
import string,sys,math
from string import ljust
if len(sys.argv)<1:
print 'Need to give the input filename as the argument'
sys.exit()
inp=open(sys.argv[1],'r')
out=open('newoutput.cor','w')
for each in inp:
els=each.split()
if each.startswith('*') :
pass
elif len(els) == 1 :
print>> out, "%5d"%(int(els[0]))
else:
print>> out, "%5d%5d%5s%5s%10.5f%10.5f%10.5f%5s%5s%10.5f"%(int(els[0]), int(els[1]), ljust(' '+els[2],5), ljust(' '+els[3],5), float(els[4]), float(els[5]), float(els[6]), ljust(' '+els[7],5), ljust(' '+els[1],5), 0.0)
# From the charmm documentation:
# ATOMNO RESNO RES TYPE X Y Z SEGID RESID Weighting
# I5 I5 1X A4 1X A4 F10.5 F10.5 F10.5 1X A4 1X A4 F10.5
inp.close()
out.close()
|
marktoakley/LamarckiAnt
|
SCRIPTS/CHARMM/standardize_crd.py
|
Python
|
gpl-3.0
| 1,098
|
[
"Amber",
"CHARMM",
"VMD"
] |
fdd32ca70a83ed258a57c0327c373e57628700e77a0ce005a50fe5c11b7af3c6
|
"""Algorithms for spectral clustering"""
# Author: Gael Varoquaux gael.varoquaux@normalesup.org
# License: BSD
import warnings
import numpy as np
from ..base import BaseEstimator, ClusterMixin
from ..utils import check_random_state
from ..utils.graph import graph_laplacian
from ..metrics.pairwise import rbf_kernel
from ..neighbors import kneighbors_graph
from .k_means_ import k_means
def _set_diag(laplacian, value):
from scipy import sparse
n_nodes = laplacian.shape[0]
# We need to put the diagonal at zero
if not sparse.isspmatrix(laplacian):
laplacian.flat[::n_nodes + 1] = value
else:
laplacian = laplacian.tocoo()
diag_idx = (laplacian.row == laplacian.col)
laplacian.data[diag_idx] = value
# If the matrix has a small number of diagonals (as in the
# case of structured matrices comming from images), the
# dia format might be best suited for matvec products:
n_diags = np.unique(laplacian.row - laplacian.col).size
if n_diags <= 7:
# 3 or less outer diagonals on each side
laplacian = laplacian.todia()
else:
# csr has the fastest matvec and is thus best suited to
# arpack
laplacian = laplacian.tocsr()
return laplacian
def spectral_embedding(adjacency, n_components=8, mode=None,
random_state=None):
"""Project the sample on the first eigen vectors of the graph Laplacian
The adjacency matrix is used to compute a normalized graph Laplacian
whose spectrum (especially the eigen vectors associated to the
smallest eigen values) has an interpretation in terms of minimal
number of cuts necessary to split the graph into comparably sized
components.
This embedding can also 'work' even if the ``adjacency`` variable is
not strictly the adjacency matrix of a graph but more generally
an affinity or similarity matrix between samples (for instance the
heat kernel of a euclidean distance matrix or a k-NN matrix).
However care must taken to always make the affinity matrix symmetric
so that the eigen vector decomposition works as expected.
Parameters
-----------
adjacency: array-like or sparse matrix, shape: (n_samples, n_samples)
The adjacency matrix of the graph to embed.
n_components: integer, optional
The dimension of the projection subspace.
mode: {None, 'arpack', 'lobpcg', or 'amg'}
The eigenvalue decomposition strategy to use. AMG requires pyamg
to be installed. It can be faster on very large, sparse problems,
but may also lead to instabilities
random_state: int seed, RandomState instance, or None (default)
A pseudo random number generator used for the initialization of the
lobpcg eigen vectors decomposition when mode == 'amg'. By default
arpack is used.
Returns
--------
embedding: array, shape: (n_samples, n_components)
The reduced samples
Notes
------
The graph should contain only one connected component, elsewhere the
results make little sense.
"""
from scipy import sparse
from ..utils.arpack import eigsh
from scipy.sparse.linalg import lobpcg
from scipy.sparse.linalg.eigen.lobpcg.lobpcg import symeig
try:
from pyamg import smoothed_aggregation_solver
except ImportError:
if mode == "amg":
raise ValueError("The mode was set to 'amg', but pyamg is "
"not available.")
random_state = check_random_state(random_state)
n_nodes = adjacency.shape[0]
# XXX: Should we check that the matrices given is symmetric
if mode is None:
mode = 'arpack'
elif not mode in ('arpack', 'lobpcg', 'amg'):
raise ValueError("Unknown value for mode: '%s'."
"Should be 'amg' or 'arpack'" % mode)
laplacian, dd = graph_laplacian(adjacency,
normed=True, return_diag=True)
if (mode == 'arpack'
or not sparse.isspmatrix(laplacian)
or n_nodes < 5 * n_components):
# lobpcg used with mode='amg' has bugs for low number of nodes
laplacian = _set_diag(laplacian, 0)
# Here we'll use shift-invert mode for fast eigenvalues
# (see http://docs.scipy.org/doc/scipy/reference/tutorial/arpack.html
# for a short explanation of what this means)
# Because the normalized Laplacian has eigenvalues between 0 and 2,
# I - L has eigenvalues between -1 and 1. ARPACK is most efficient
# when finding eigenvalues of largest magnitude (keyword which='LM')
# and when these eigenvalues are very large compared to the rest.
# For very large, very sparse graphs, I - L can have many, many
# eigenvalues very near 1.0. This leads to slow convergence. So
# instead, we'll use ARPACK's shift-invert mode, asking for the
# eigenvalues near 1.0. This effectively spreads-out the spectrum
# near 1.0 and leads to much faster convergence: potentially an
# orders-of-magnitude speedup over simply using keyword which='LA'
# in standard mode.
try:
lambdas, diffusion_map = eigsh(-laplacian, k=n_components,
sigma=1.0, which='LM')
embedding = diffusion_map.T[::-1] * dd
except RuntimeError:
# When submatrices are exactly singular, an LU decomposition
# in arpack fails. We fallback to lobpcg
mode = "lobpcg"
if mode == 'amg':
# Use AMG to get a preconditioner and speed up the eigenvalue
# problem.
laplacian = laplacian.astype(np.float) # lobpcg needs native floats
ml = smoothed_aggregation_solver(laplacian.tocsr())
M = ml.aspreconditioner()
X = random_state.rand(laplacian.shape[0], n_components)
#X[:, 0] = 1. / dd.ravel()
X[:, 0] = dd.ravel()
lambdas, diffusion_map = lobpcg(laplacian, X, M=M, tol=1.e-12,
largest=False)
embedding = diffusion_map.T * dd
if embedding.shape[0] == 1:
raise ValueError
elif mode == "lobpcg":
laplacian = laplacian.astype(np.float) # lobpcg needs native floats
if n_nodes < 5 * n_components + 1:
# lobpcg will fallback to symeig, so we short circuit it
if sparse.isspmatrix(laplacian):
laplacian = laplacian.todense()
lambdas, diffusion_map = symeig(laplacian)
embedding = diffusion_map.T[:n_components] * dd
else:
laplacian = laplacian.astype(np.float) # lobpcg needs native floats
laplacian = _set_diag(laplacian, 1)
# We increase the number of eigenvectors requested, as lobpcg
# doesn't behave well in low dimension
X = random_state.rand(laplacian.shape[0], n_components + 1)
X[:, 0] = dd.ravel()
lambdas, diffusion_map = lobpcg(laplacian, X, tol=1e-15,
largest=False, maxiter=2000)
embedding = diffusion_map.T[:n_components] * dd
if embedding.shape[0] == 1:
raise ValueError
return embedding
def spectral_clustering(affinity, n_clusters=8, n_components=None, mode=None,
random_state=None, n_init=10, k=None):
"""Apply k-means to a projection to the normalized laplacian
In practice Spectral Clustering is very useful when the structure of
the individual clusters is highly non-convex or more generally when
a measure of the center and spread of the cluster is not a suitable
description of the complete cluster. For instance when clusters are
nested circles on the 2D plan.
If affinity is the adjacency matrix of a graph, this method can be
used to find normalized graph cuts.
Parameters
-----------
affinity: array-like or sparse matrix, shape: (n_samples, n_samples)
The affinity matrix describing the relationship of the samples to
embed. **Must be symetric**.
Possible examples:
- adjacency matrix of a graph,
- heat kernel of the pairwise distance matrix of the samples,
- symmetic k-nearest neighbours connectivity matrix of the samples.
n_clusters: integer, optional
Number of clusters to extract.
n_components: integer, optional, default is k
Number of eigen vectors to use for the spectral embedding
mode: {None, 'arpack' or 'amg'}
The eigenvalue decomposition strategy to use. AMG requires pyamg
to be installed. It can be faster on very large, sparse problems,
but may also lead to instabilities
random_state: int seed, RandomState instance, or None (default)
A pseudo random number generator used for the initialization
of the lobpcg eigen vectors decomposition when mode == 'amg'
and by the K-Means initialization.
n_init: int, optional, default: 10
Number of time the k-means algorithm will be run with different
centroid seeds. The final results will be the best output of
n_init consecutive runs in terms of inertia.
Returns
-------
labels: array of integers, shape: n_samples
The labels of the clusters.
References
----------
- Normalized cuts and image segmentation, 2000
Jianbo Shi, Jitendra Malik
http://citeseer.ist.psu.edu/viewdoc/summary?doi=10.1.1.160.2324
- A Tutorial on Spectral Clustering, 2007
Ulrike von Luxburg
http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.165.9323
Notes
------
The graph should contain only one connect component, elsewhere
the results make little sense.
This algorithm solves the normalized cut for k=2: it is a
normalized spectral clustering.
"""
if not k is None:
warnings.warn("'k' was renamed to n_clusters", DeprecationWarning)
n_clusters = k
random_state = check_random_state(random_state)
n_components = n_clusters if n_components is None else n_components
maps = spectral_embedding(affinity, n_components=n_components,
mode=mode, random_state=random_state)
maps = maps[1:]
_, labels, _ = k_means(maps.T, n_clusters, random_state=random_state,
n_init=n_init)
return labels
class SpectralClustering(BaseEstimator, ClusterMixin):
"""Apply k-means to a projection to the normalized laplacian
In practice Spectral Clustering is very useful when the structure of
the individual clusters is highly non-convex or more generally when
a measure of the center and spread of the cluster is not a suitable
description of the complete cluster. For instance when clusters are
nested circles on the 2D plan.
If affinity is the adjacency matrix of a graph, this method can be
used to find normalized graph cuts.
When calling ``fit``, an affinity matrix is constructed using either the
Gaussian (aka RBF) kernel of the euclidean distanced ``d(X, X)``::
np.exp(-gamma * d(X,X) ** 2)
or a k-nearest neighbors connectivity matrix.
Alternatively, using ``precomputed``, a user-provided affinity
matrix can be used.
Parameters
-----------
n_clusters : integer, optional
The dimension of the projection subspace.
affinity: string, 'nearest_neighbors', 'rbf' or 'precomputed'
gamma: float
Scaling factor of Gaussian (rbf) affinity kernel. Ignored for
``affinity='nearest_neighbors'``.
n_neighbors: integer
Number of neighbors to use when constructing the affinity matrix using
the nearest neighbors method. Ignored for ``affinity='rbf'``.
mode: {None, 'arpack' or 'amg'}
The eigenvalue decomposition strategy to use. AMG requires pyamg
to be installed. It can be faster on very large, sparse problems,
but may also lead to instabilities
random_state : int seed, RandomState instance, or None (default)
A pseudo random number generator used for the initialization
of the lobpcg eigen vectors decomposition when mode == 'amg'
and by the K-Means initialization.
n_init : int, optional, default: 10
Number of time the k-means algorithm will be run with different
centroid seeds. The final results will be the best output of
n_init consecutive runs in terms of inertia.
Attributes
----------
`affinity_matrix_` : array-like, shape (n_samples, n_samples)
Affinity matrix used for clustering. Available only if after calling
``fit``.
`labels_` :
Labels of each point
Notes
-----
If you have an affinity matrix, such as a distance matrix,
for which 0 means identical elements, and high values means
very dissimilar elements, it can be transformed in a
similarity matrix that is well suited for the algorithm by
applying the Gaussian (RBF, heat) kernel::
np.exp(- X ** 2 / (2. * delta ** 2))
Another alternative is to take a symmetric version of the k
nearest neighbors connectivity matrix of the points.
If the pyamg package is installed, it is used: this greatly
speeds up computation.
References
----------
- Normalized cuts and image segmentation, 2000
Jianbo Shi, Jitendra Malik
http://citeseer.ist.psu.edu/viewdoc/summary?doi=10.1.1.160.2324
- A Tutorial on Spectral Clustering, 2007
Ulrike von Luxburg
http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.165.9323
"""
def __init__(self, n_clusters=8, mode=None, random_state=None, n_init=10,
gamma=1., affinity='rbf', n_neighbors=10, k=None,
precomputed=False):
if not k is None:
warnings.warn("'k' was renamed to n_clusters", DeprecationWarning)
n_clusters = k
self.n_clusters = n_clusters
self.mode = mode
self.random_state = random_state
self.n_init = n_init
self.gamma = gamma
self.affinity = affinity
self.n_neighbors = n_neighbors
def fit(self, X):
"""Creates an affinity matrix for X using the selected affinity,
then applies spectral clustering to this affinity matrix.
Parameters
----------
X : array-like or sparse matrix, shape (n_samples, n_features)
OR, if affinity==`precomputed`, a precomputed affinity
matrix of shape (n_samples, n_samples)
"""
if X.shape[0] == X.shape[1] and self.affinity != "precomputed":
warnings.warn("The spectral clustering API has changed. ``fit``"
"now constructs an affinity matrix from data. To use "
"a custom affinity matrix, set ``affinity=precomputed``.")
if self.affinity == 'rbf':
self.affinity_matrix_ = rbf_kernel(X, gamma=self.gamma)
elif self.affinity == 'nearest_neighbors':
connectivity = kneighbors_graph(X, n_neighbors=self.n_neighbors)
self.affinity_matrix_ = 0.5 * (connectivity + connectivity.T)
elif self.affinity == 'precomputed':
self.affinity_matrix_ = X
else:
raise ValueError("Invalid 'affinity'. Expected 'rbf', "
"'nearest_neighbors' or 'precomputed', got '%s'."
% self.affinity_matrix)
self.random_state = check_random_state(self.random_state)
self.labels_ = spectral_clustering(self.affinity_matrix_,
n_clusters=self.n_clusters, mode=self.mode,
random_state=self.random_state, n_init=self.n_init)
return self
@property
def _pairwise(self):
return self.affinity == "precomputed"
|
GbalsaC/bitnamiP
|
venv/lib/python2.7/site-packages/sklearn/cluster/spectral.py
|
Python
|
agpl-3.0
| 15,966
|
[
"Gaussian"
] |
d4e8b96c2af391eeb31b44555f1fb76dc078640170593e9cff2854034e21633a
|
# sql/elements.py
# Copyright (C) 2005-2014 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""Core SQL expression elements, including :class:`.ClauseElement`,
:class:`.ColumnElement`, and derived classes.
"""
from __future__ import unicode_literals
from .. import util, exc, inspection
from . import type_api
from . import operators
from .visitors import Visitable, cloned_traverse, traverse
from .annotation import Annotated
import itertools
from .base import Executable, PARSE_AUTOCOMMIT, Immutable, NO_ARG
from .base import _generative, Generative
import re
import operator
def _clone(element, **kw):
return element._clone()
def collate(expression, collation):
"""Return the clause ``expression COLLATE collation``.
e.g.::
collate(mycolumn, 'utf8_bin')
produces::
mycolumn COLLATE utf8_bin
"""
expr = _literal_as_binds(expression)
return BinaryExpression(
expr,
_literal_as_text(collation),
operators.collate, type_=expr.type)
def between(expr, lower_bound, upper_bound, symmetric=False):
"""Produce a ``BETWEEN`` predicate clause.
E.g.::
from sqlalchemy import between
stmt = select([users_table]).where(between(users_table.c.id, 5, 7))
Would produce SQL resembling::
SELECT id, name FROM user WHERE id BETWEEN :id_1 AND :id_2
The :func:`.between` function is a standalone version of the
:meth:`.ColumnElement.between` method available on all
SQL expressions, as in::
stmt = select([users_table]).where(users_table.c.id.between(5, 7))
All arguments passed to :func:`.between`, including the left side
column expression, are coerced from Python scalar values if a
the value is not a :class:`.ColumnElement` subclass. For example,
three fixed values can be compared as in::
print(between(5, 3, 7))
Which would produce::
:param_1 BETWEEN :param_2 AND :param_3
:param expr: a column expression, typically a :class:`.ColumnElement`
instance or alternatively a Python scalar expression to be coerced
into a column expression, serving as the left side of the ``BETWEEN``
expression.
:param lower_bound: a column or Python scalar expression serving as the
lower bound of the right side of the ``BETWEEN`` expression.
:param upper_bound: a column or Python scalar expression serving as the
upper bound of the right side of the ``BETWEEN`` expression.
:param symmetric: if True, will render " BETWEEN SYMMETRIC ". Note
that not all databases support this syntax.
.. versionadded:: 0.9.5
.. seealso::
:meth:`.ColumnElement.between`
"""
expr = _literal_as_binds(expr)
return expr.between(lower_bound, upper_bound, symmetric=symmetric)
def literal(value, type_=None):
"""Return a literal clause, bound to a bind parameter.
Literal clauses are created automatically when non-
:class:`.ClauseElement` objects (such as strings, ints, dates, etc.) are
used in a comparison operation with a :class:`.ColumnElement` subclass,
such as a :class:`~sqlalchemy.schema.Column` object. Use this function
to force the generation of a literal clause, which will be created as a
:class:`BindParameter` with a bound value.
:param value: the value to be bound. Can be any Python object supported by
the underlying DB-API, or is translatable via the given type argument.
:param type\_: an optional :class:`~sqlalchemy.types.TypeEngine` which
will provide bind-parameter translation for this literal.
"""
return BindParameter(None, value, type_=type_, unique=True)
def type_coerce(expression, type_):
"""Associate a SQL expression with a particular type, without rendering
``CAST``.
E.g.::
from sqlalchemy import type_coerce
stmt = select([type_coerce(log_table.date_string, StringDateTime())])
The above construct will produce SQL that is usually otherwise unaffected
by the :func:`.type_coerce` call::
SELECT date_string FROM log
However, when result rows are fetched, the ``StringDateTime`` type
will be applied to result rows on behalf of the ``date_string`` column.
A type that features bound-value handling will also have that behavior
take effect when literal values or :func:`.bindparam` constructs are
passed to :func:`.type_coerce` as targets.
For example, if a type implements the :meth:`.TypeEngine.bind_expression`
method or :meth:`.TypeEngine.bind_processor` method or equivalent,
these functions will take effect at statement compilation/execution time
when a literal value is passed, as in::
# bound-value handling of MyStringType will be applied to the
# literal value "some string"
stmt = select([type_coerce("some string", MyStringType)])
:func:`.type_coerce` is similar to the :func:`.cast` function,
except that it does not render the ``CAST`` expression in the resulting
statement.
:param expression: A SQL expression, such as a :class:`.ColumnElement`
expression or a Python string which will be coerced into a bound literal
value.
:param type_: A :class:`.TypeEngine` class or instance indicating
the type to which the expression is coerced.
.. seealso::
:func:`.cast`
"""
type_ = type_api.to_instance(type_)
if hasattr(expression, '__clause_element__'):
return type_coerce(expression.__clause_element__(), type_)
elif isinstance(expression, BindParameter):
bp = expression._clone()
bp.type = type_
return bp
elif not isinstance(expression, Visitable):
if expression is None:
return Null()
else:
return literal(expression, type_=type_)
else:
return Label(None, expression, type_=type_)
def outparam(key, type_=None):
"""Create an 'OUT' parameter for usage in functions (stored procedures),
for databases which support them.
The ``outparam`` can be used like a regular function parameter.
The "output" value will be available from the
:class:`~sqlalchemy.engine.ResultProxy` object via its ``out_parameters``
attribute, which returns a dictionary containing the values.
"""
return BindParameter(
key, None, type_=type_, unique=False, isoutparam=True)
def not_(clause):
"""Return a negation of the given clause, i.e. ``NOT(clause)``.
The ``~`` operator is also overloaded on all
:class:`.ColumnElement` subclasses to produce the
same result.
"""
return operators.inv(_literal_as_binds(clause))
@inspection._self_inspects
class ClauseElement(Visitable):
"""Base class for elements of a programmatically constructed SQL
expression.
"""
__visit_name__ = 'clause'
_annotations = {}
supports_execution = False
_from_objects = []
bind = None
_is_clone_of = None
is_selectable = False
is_clause_element = True
description = None
_order_by_label_element = None
_is_from_container = False
def _clone(self):
"""Create a shallow copy of this ClauseElement.
This method may be used by a generative API. Its also used as
part of the "deep" copy afforded by a traversal that combines
the _copy_internals() method.
"""
c = self.__class__.__new__(self.__class__)
c.__dict__ = self.__dict__.copy()
ClauseElement._cloned_set._reset(c)
ColumnElement.comparator._reset(c)
# this is a marker that helps to "equate" clauses to each other
# when a Select returns its list of FROM clauses. the cloning
# process leaves around a lot of remnants of the previous clause
# typically in the form of column expressions still attached to the
# old table.
c._is_clone_of = self
return c
@property
def _constructor(self):
"""return the 'constructor' for this ClauseElement.
This is for the purposes for creating a new object of
this type. Usually, its just the element's __class__.
However, the "Annotated" version of the object overrides
to return the class of its proxied element.
"""
return self.__class__
@util.memoized_property
def _cloned_set(self):
"""Return the set consisting all cloned ancestors of this
ClauseElement.
Includes this ClauseElement. This accessor tends to be used for
FromClause objects to identify 'equivalent' FROM clauses, regardless
of transformative operations.
"""
s = util.column_set()
f = self
while f is not None:
s.add(f)
f = f._is_clone_of
return s
def __getstate__(self):
d = self.__dict__.copy()
d.pop('_is_clone_of', None)
return d
def _annotate(self, values):
"""return a copy of this ClauseElement with annotations
updated by the given dictionary.
"""
return Annotated(self, values)
def _with_annotations(self, values):
"""return a copy of this ClauseElement with annotations
replaced by the given dictionary.
"""
return Annotated(self, values)
def _deannotate(self, values=None, clone=False):
"""return a copy of this :class:`.ClauseElement` with annotations
removed.
:param values: optional tuple of individual values
to remove.
"""
if clone:
# clone is used when we are also copying
# the expression for a deep deannotation
return self._clone()
else:
# if no clone, since we have no annotations we return
# self
return self
def _execute_on_connection(self, connection, multiparams, params):
return connection._execute_clauseelement(self, multiparams, params)
def unique_params(self, *optionaldict, **kwargs):
"""Return a copy with :func:`bindparam()` elements replaced.
Same functionality as ``params()``, except adds `unique=True`
to affected bind parameters so that multiple statements can be
used.
"""
return self._params(True, optionaldict, kwargs)
def params(self, *optionaldict, **kwargs):
"""Return a copy with :func:`bindparam()` elements replaced.
Returns a copy of this ClauseElement with :func:`bindparam()`
elements replaced with values taken from the given dictionary::
>>> clause = column('x') + bindparam('foo')
>>> print clause.compile().params
{'foo':None}
>>> print clause.params({'foo':7}).compile().params
{'foo':7}
"""
return self._params(False, optionaldict, kwargs)
def _params(self, unique, optionaldict, kwargs):
if len(optionaldict) == 1:
kwargs.update(optionaldict[0])
elif len(optionaldict) > 1:
raise exc.ArgumentError(
"params() takes zero or one positional dictionary argument")
def visit_bindparam(bind):
if bind.key in kwargs:
bind.value = kwargs[bind.key]
bind.required = False
if unique:
bind._convert_to_unique()
return cloned_traverse(self, {}, {'bindparam': visit_bindparam})
def compare(self, other, **kw):
"""Compare this ClauseElement to the given ClauseElement.
Subclasses should override the default behavior, which is a
straight identity comparison.
\**kw are arguments consumed by subclass compare() methods and
may be used to modify the criteria for comparison.
(see :class:`.ColumnElement`)
"""
return self is other
def _copy_internals(self, clone=_clone, **kw):
"""Reassign internal elements to be clones of themselves.
Called during a copy-and-traverse operation on newly
shallow-copied elements to create a deep copy.
The given clone function should be used, which may be applying
additional transformations to the element (i.e. replacement
traversal, cloned traversal, annotations).
"""
pass
def get_children(self, **kwargs):
"""Return immediate child elements of this :class:`.ClauseElement`.
This is used for visit traversal.
\**kwargs may contain flags that change the collection that is
returned, for example to return a subset of items in order to
cut down on larger traversals, or to return child items from a
different context (such as schema-level collections instead of
clause-level).
"""
return []
def self_group(self, against=None):
"""Apply a 'grouping' to this :class:`.ClauseElement`.
This method is overridden by subclasses to return a
"grouping" construct, i.e. parenthesis. In particular
it's used by "binary" expressions to provide a grouping
around themselves when placed into a larger expression,
as well as by :func:`.select` constructs when placed into
the FROM clause of another :func:`.select`. (Note that
subqueries should be normally created using the
:meth:`.Select.alias` method, as many platforms require
nested SELECT statements to be named).
As expressions are composed together, the application of
:meth:`self_group` is automatic - end-user code should never
need to use this method directly. Note that SQLAlchemy's
clause constructs take operator precedence into account -
so parenthesis might not be needed, for example, in
an expression like ``x OR (y AND z)`` - AND takes precedence
over OR.
The base :meth:`self_group` method of :class:`.ClauseElement`
just returns self.
"""
return self
@util.dependencies("sqlalchemy.engine.default")
def compile(self, default, bind=None, dialect=None, **kw):
"""Compile this SQL expression.
The return value is a :class:`~.Compiled` object.
Calling ``str()`` or ``unicode()`` on the returned value will yield a
string representation of the result. The
:class:`~.Compiled` object also can return a
dictionary of bind parameter names and values
using the ``params`` accessor.
:param bind: An ``Engine`` or ``Connection`` from which a
``Compiled`` will be acquired. This argument takes precedence over
this :class:`.ClauseElement`'s bound engine, if any.
:param column_keys: Used for INSERT and UPDATE statements, a list of
column names which should be present in the VALUES clause of the
compiled statement. If ``None``, all columns from the target table
object are rendered.
:param dialect: A ``Dialect`` instance from which a ``Compiled``
will be acquired. This argument takes precedence over the `bind`
argument as well as this :class:`.ClauseElement`'s bound engine,
if any.
:param inline: Used for INSERT statements, for a dialect which does
not support inline retrieval of newly generated primary key
columns, will force the expression used to create the new primary
key value to be rendered inline within the INSERT statement's
VALUES clause. This typically refers to Sequence execution but may
also refer to any server-side default generation function
associated with a primary key `Column`.
:param compile_kwargs: optional dictionary of additional parameters
that will be passed through to the compiler within all "visit"
methods. This allows any custom flag to be passed through to
a custom compilation construct, for example. It is also used
for the case of passing the ``literal_binds`` flag through::
from sqlalchemy.sql import table, column, select
t = table('t', column('x'))
s = select([t]).where(t.c.x == 5)
print s.compile(compile_kwargs={"literal_binds": True})
.. versionadded:: 0.9.0
.. seealso::
:ref:`faq_sql_expression_string`
"""
if not dialect:
if bind:
dialect = bind.dialect
elif self.bind:
dialect = self.bind.dialect
bind = self.bind
else:
dialect = default.DefaultDialect()
return self._compiler(dialect, bind=bind, **kw)
def _compiler(self, dialect, **kw):
"""Return a compiler appropriate for this ClauseElement, given a
Dialect."""
return dialect.statement_compiler(dialect, self, **kw)
def __str__(self):
if util.py3k:
return str(self.compile())
else:
return unicode(self.compile()).encode('ascii', 'backslashreplace')
def __and__(self, other):
"""'and' at the ClauseElement level.
.. deprecated:: 0.9.5 - conjunctions are intended to be
at the :class:`.ColumnElement`. level
"""
return and_(self, other)
def __or__(self, other):
"""'or' at the ClauseElement level.
.. deprecated:: 0.9.5 - conjunctions are intended to be
at the :class:`.ColumnElement`. level
"""
return or_(self, other)
def __invert__(self):
if hasattr(self, 'negation_clause'):
return self.negation_clause
else:
return self._negate()
def _negate(self):
return UnaryExpression(
self.self_group(against=operators.inv),
operator=operators.inv,
negate=None)
def __bool__(self):
raise TypeError("Boolean value of this clause is not defined")
__nonzero__ = __bool__
def __repr__(self):
friendly = self.description
if friendly is None:
return object.__repr__(self)
else:
return '<%s.%s at 0x%x; %s>' % (
self.__module__, self.__class__.__name__, id(self), friendly)
class ColumnElement(operators.ColumnOperators, ClauseElement):
"""Represent a column-oriented SQL expression suitable for usage in the
"columns" clause, WHERE clause etc. of a statement.
While the most familiar kind of :class:`.ColumnElement` is the
:class:`.Column` object, :class:`.ColumnElement` serves as the basis
for any unit that may be present in a SQL expression, including
the expressions themselves, SQL functions, bound parameters,
literal expressions, keywords such as ``NULL``, etc.
:class:`.ColumnElement` is the ultimate base class for all such elements.
A wide variety of SQLAlchemy Core functions work at the SQL expression
level, and are intended to accept instances of :class:`.ColumnElement` as
arguments. These functions will typically document that they accept a
"SQL expression" as an argument. What this means in terms of SQLAlchemy
usually refers to an input which is either already in the form of a
:class:`.ColumnElement` object, or a value which can be **coerced** into
one. The coercion rules followed by most, but not all, SQLAlchemy Core
functions with regards to SQL expressions are as follows:
* a literal Python value, such as a string, integer or floating
point value, boolean, datetime, ``Decimal`` object, or virtually
any other Python object, will be coerced into a "literal bound
value". This generally means that a :func:`.bindparam` will be
produced featuring the given value embedded into the construct; the
resulting :class:`.BindParameter` object is an instance of
:class:`.ColumnElement`. The Python value will ultimately be sent
to the DBAPI at execution time as a paramterized argument to the
``execute()`` or ``executemany()`` methods, after SQLAlchemy
type-specific converters (e.g. those provided by any associated
:class:`.TypeEngine` objects) are applied to the value.
* any special object value, typically ORM-level constructs, which
feature a method called ``__clause_element__()``. The Core
expression system looks for this method when an object of otherwise
unknown type is passed to a function that is looking to coerce the
argument into a :class:`.ColumnElement` expression. The
``__clause_element__()`` method, if present, should return a
:class:`.ColumnElement` instance. The primary use of
``__clause_element__()`` within SQLAlchemy is that of class-bound
attributes on ORM-mapped classes; a ``User`` class which contains a
mapped attribute named ``.name`` will have a method
``User.name.__clause_element__()`` which when invoked returns the
:class:`.Column` called ``name`` associated with the mapped table.
* The Python ``None`` value is typically interpreted as ``NULL``,
which in SQLAlchemy Core produces an instance of :func:`.null`.
A :class:`.ColumnElement` provides the ability to generate new
:class:`.ColumnElement`
objects using Python expressions. This means that Python operators
such as ``==``, ``!=`` and ``<`` are overloaded to mimic SQL operations,
and allow the instantiation of further :class:`.ColumnElement` instances
which are composed from other, more fundamental :class:`.ColumnElement`
objects. For example, two :class:`.ColumnClause` objects can be added
together with the addition operator ``+`` to produce
a :class:`.BinaryExpression`.
Both :class:`.ColumnClause` and :class:`.BinaryExpression` are subclasses
of :class:`.ColumnElement`::
>>> from sqlalchemy.sql import column
>>> column('a') + column('b')
<sqlalchemy.sql.expression.BinaryExpression object at 0x101029dd0>
>>> print column('a') + column('b')
a + b
.. seealso::
:class:`.Column`
:func:`.expression.column`
"""
__visit_name__ = 'column'
primary_key = False
foreign_keys = []
_label = None
_key_label = key = None
_alt_names = ()
def self_group(self, against=None):
if (against in (operators.and_, operators.or_, operators._asbool) and
self.type._type_affinity
is type_api.BOOLEANTYPE._type_affinity):
return AsBoolean(self, operators.istrue, operators.isfalse)
else:
return self
def _negate(self):
if self.type._type_affinity is type_api.BOOLEANTYPE._type_affinity:
return AsBoolean(self, operators.isfalse, operators.istrue)
else:
return super(ColumnElement, self)._negate()
@util.memoized_property
def type(self):
return type_api.NULLTYPE
@util.memoized_property
def comparator(self):
return self.type.comparator_factory(self)
def __getattr__(self, key):
try:
return getattr(self.comparator, key)
except AttributeError:
raise AttributeError(
'Neither %r object nor %r object has an attribute %r' % (
type(self).__name__,
type(self.comparator).__name__,
key)
)
def operate(self, op, *other, **kwargs):
return op(self.comparator, *other, **kwargs)
def reverse_operate(self, op, other, **kwargs):
return op(other, self.comparator, **kwargs)
def _bind_param(self, operator, obj):
return BindParameter(None, obj,
_compared_to_operator=operator,
_compared_to_type=self.type, unique=True)
@property
def expression(self):
"""Return a column expression.
Part of the inspection interface; returns self.
"""
return self
@property
def _select_iterable(self):
return (self, )
@util.memoized_property
def base_columns(self):
return util.column_set(c for c in self.proxy_set
if not hasattr(c, '_proxies'))
@util.memoized_property
def proxy_set(self):
s = util.column_set([self])
if hasattr(self, '_proxies'):
for c in self._proxies:
s.update(c.proxy_set)
return s
def shares_lineage(self, othercolumn):
"""Return True if the given :class:`.ColumnElement`
has a common ancestor to this :class:`.ColumnElement`."""
return bool(self.proxy_set.intersection(othercolumn.proxy_set))
def _compare_name_for_result(self, other):
"""Return True if the given column element compares to this one
when targeting within a result row."""
return hasattr(other, 'name') and hasattr(self, 'name') and \
other.name == self.name
def _make_proxy(
self, selectable, name=None, name_is_truncatable=False, **kw):
"""Create a new :class:`.ColumnElement` representing this
:class:`.ColumnElement` as it appears in the select list of a
descending selectable.
"""
if name is None:
name = self.anon_label
if self.key:
key = self.key
else:
try:
key = str(self)
except exc.UnsupportedCompilationError:
key = self.anon_label
else:
key = name
co = ColumnClause(
_as_truncated(name) if name_is_truncatable else name,
type_=getattr(self, 'type', None),
_selectable=selectable
)
co._proxies = [self]
if selectable._is_clone_of is not None:
co._is_clone_of = \
selectable._is_clone_of.columns.get(key)
selectable._columns[key] = co
return co
def compare(self, other, use_proxies=False, equivalents=None, **kw):
"""Compare this ColumnElement to another.
Special arguments understood:
:param use_proxies: when True, consider two columns that
share a common base column as equivalent (i.e. shares_lineage())
:param equivalents: a dictionary of columns as keys mapped to sets
of columns. If the given "other" column is present in this
dictionary, if any of the columns in the corresponding set() pass
the comparison test, the result is True. This is used to expand the
comparison to other columns that may be known to be equivalent to
this one via foreign key or other criterion.
"""
to_compare = (other, )
if equivalents and other in equivalents:
to_compare = equivalents[other].union(to_compare)
for oth in to_compare:
if use_proxies and self.shares_lineage(oth):
return True
elif hash(oth) == hash(self):
return True
else:
return False
def label(self, name):
"""Produce a column label, i.e. ``<columnname> AS <name>``.
This is a shortcut to the :func:`~.expression.label` function.
if 'name' is None, an anonymous label name will be generated.
"""
return Label(name, self, self.type)
@util.memoized_property
def anon_label(self):
"""provides a constant 'anonymous label' for this ColumnElement.
This is a label() expression which will be named at compile time.
The same label() is returned each time anon_label is called so
that expressions can reference anon_label multiple times, producing
the same label name at compile time.
the compiler uses this function automatically at compile time
for expressions that are known to be 'unnamed' like binary
expressions and function calls.
"""
return _anonymous_label(
'%%(%d %s)s' % (id(self), getattr(self, 'name', 'anon'))
)
class BindParameter(ColumnElement):
"""Represent a "bound expression".
:class:`.BindParameter` is invoked explicitly using the
:func:`.bindparam` function, as in::
from sqlalchemy import bindparam
stmt = select([users_table]).\\
where(users_table.c.name == bindparam('username'))
Detailed discussion of how :class:`.BindParameter` is used is
at :func:`.bindparam`.
.. seealso::
:func:`.bindparam`
"""
__visit_name__ = 'bindparam'
_is_crud = False
def __init__(self, key, value=NO_ARG, type_=None,
unique=False, required=NO_ARG,
quote=None, callable_=None,
isoutparam=False,
_compared_to_operator=None,
_compared_to_type=None):
"""Produce a "bound expression".
The return value is an instance of :class:`.BindParameter`; this
is a :class:`.ColumnElement` subclass which represents a so-called
"placeholder" value in a SQL expression, the value of which is
supplied at the point at which the statement in executed against a
database connection.
In SQLAlchemy, the :func:`.bindparam` construct has
the ability to carry along the actual value that will be ultimately
used at expression time. In this way, it serves not just as
a "placeholder" for eventual population, but also as a means of
representing so-called "unsafe" values which should not be rendered
directly in a SQL statement, but rather should be passed along
to the :term:`DBAPI` as values which need to be correctly escaped
and potentially handled for type-safety.
When using :func:`.bindparam` explicitly, the use case is typically
one of traditional deferment of parameters; the :func:`.bindparam`
construct accepts a name which can then be referred to at execution
time::
from sqlalchemy import bindparam
stmt = select([users_table]).\\
where(users_table.c.name == bindparam('username'))
The above statement, when rendered, will produce SQL similar to::
SELECT id, name FROM user WHERE name = :username
In order to populate the value of ``:username`` above, the value
would typically be applied at execution time to a method
like :meth:`.Connection.execute`::
result = connection.execute(stmt, username='wendy')
Explicit use of :func:`.bindparam` is also common when producing
UPDATE or DELETE statements that are to be invoked multiple times,
where the WHERE criterion of the statement is to change on each
invocation, such as::
stmt = (users_table.update().
where(user_table.c.name == bindparam('username')).
values(fullname=bindparam('fullname'))
)
connection.execute(
stmt, [{"username": "wendy", "fullname": "Wendy Smith"},
{"username": "jack", "fullname": "Jack Jones"},
]
)
SQLAlchemy's Core expression system makes wide use of
:func:`.bindparam` in an implicit sense. It is typical that Python
literal values passed to virtually all SQL expression functions are
coerced into fixed :func:`.bindparam` constructs. For example, given
a comparison operation such as::
expr = users_table.c.name == 'Wendy'
The above expression will produce a :class:`.BinaryExpression`
construct, where the left side is the :class:`.Column` object
representing the ``name`` column, and the right side is a
:class:`.BindParameter` representing the literal value::
print(repr(expr.right))
BindParameter('%(4327771088 name)s', 'Wendy', type_=String())
The expression above will render SQL such as::
user.name = :name_1
Where the ``:name_1`` parameter name is an anonymous name. The
actual string ``Wendy`` is not in the rendered string, but is carried
along where it is later used within statement execution. If we
invoke a statement like the following::
stmt = select([users_table]).where(users_table.c.name == 'Wendy')
result = connection.execute(stmt)
We would see SQL logging output as::
SELECT "user".id, "user".name
FROM "user"
WHERE "user".name = %(name_1)s
{'name_1': 'Wendy'}
Above, we see that ``Wendy`` is passed as a parameter to the database,
while the placeholder ``:name_1`` is rendered in the appropriate form
for the target database, in this case the Postgresql database.
Similarly, :func:`.bindparam` is invoked automatically
when working with :term:`CRUD` statements as far as the "VALUES"
portion is concerned. The :func:`.insert` construct produces an
``INSERT`` expression which will, at statement execution time,
generate bound placeholders based on the arguments passed, as in::
stmt = users_table.insert()
result = connection.execute(stmt, name='Wendy')
The above will produce SQL output as::
INSERT INTO "user" (name) VALUES (%(name)s)
{'name': 'Wendy'}
The :class:`.Insert` construct, at compilation/execution time,
rendered a single :func:`.bindparam` mirroring the column
name ``name`` as a result of the single ``name`` parameter
we passed to the :meth:`.Connection.execute` method.
:param key:
the key (e.g. the name) for this bind param.
Will be used in the generated
SQL statement for dialects that use named parameters. This
value may be modified when part of a compilation operation,
if other :class:`BindParameter` objects exist with the same
key, or if its length is too long and truncation is
required.
:param value:
Initial value for this bind param. Will be used at statement
execution time as the value for this parameter passed to the
DBAPI, if no other value is indicated to the statement execution
method for this particular parameter name. Defaults to ``None``.
:param callable\_:
A callable function that takes the place of "value". The function
will be called at statement execution time to determine the
ultimate value. Used for scenarios where the actual bind
value cannot be determined at the point at which the clause
construct is created, but embedded bind values are still desirable.
:param type\_:
A :class:`.TypeEngine` class or instance representing an optional
datatype for this :func:`.bindparam`. If not passed, a type
may be determined automatically for the bind, based on the given
value; for example, trivial Python types such as ``str``,
``int``, ``bool``
may result in the :class:`.String`, :class:`.Integer` or
:class:`.Boolean` types being autoamtically selected.
The type of a :func:`.bindparam` is significant especially in that
the type will apply pre-processing to the value before it is
passed to the database. For example, a :func:`.bindparam` which
refers to a datetime value, and is specified as holding the
:class:`.DateTime` type, may apply conversion needed to the
value (such as stringification on SQLite) before passing the value
to the database.
:param unique:
if True, the key name of this :class:`.BindParameter` will be
modified if another :class:`.BindParameter` of the same name
already has been located within the containing
expression. This flag is used generally by the internals
when producing so-called "anonymous" bound expressions, it
isn't generally applicable to explicitly-named :func:`.bindparam`
constructs.
:param required:
If ``True``, a value is required at execution time. If not passed,
it defaults to ``True`` if neither :paramref:`.bindparam.value`
or :paramref:`.bindparam.callable` were passed. If either of these
parameters are present, then :paramref:`.bindparam.required`
defaults to ``False``.
.. versionchanged:: 0.8 If the ``required`` flag is not specified,
it will be set automatically to ``True`` or ``False`` depending
on whether or not the ``value`` or ``callable`` parameters
were specified.
:param quote:
True if this parameter name requires quoting and is not
currently known as a SQLAlchemy reserved word; this currently
only applies to the Oracle backend, where bound names must
sometimes be quoted.
:param isoutparam:
if True, the parameter should be treated like a stored procedure
"OUT" parameter. This applies to backends such as Oracle which
support OUT parameters.
.. seealso::
:ref:`coretutorial_bind_param`
:ref:`coretutorial_insert_expressions`
:func:`.outparam`
"""
if isinstance(key, ColumnClause):
type_ = key.type
key = key.name
if required is NO_ARG:
required = (value is NO_ARG and callable_ is None)
if value is NO_ARG:
value = None
if quote is not None:
key = quoted_name(key, quote)
if unique:
self.key = _anonymous_label('%%(%d %s)s' % (id(self), key
or 'param'))
else:
self.key = key or _anonymous_label('%%(%d param)s'
% id(self))
# identifying key that won't change across
# clones, used to identify the bind's logical
# identity
self._identifying_key = self.key
# key that was passed in the first place, used to
# generate new keys
self._orig_key = key or 'param'
self.unique = unique
self.value = value
self.callable = callable_
self.isoutparam = isoutparam
self.required = required
if type_ is None:
if _compared_to_type is not None:
self.type = \
_compared_to_type.coerce_compared_value(
_compared_to_operator, value)
else:
self.type = type_api._type_map.get(type(value),
type_api.NULLTYPE)
elif isinstance(type_, type):
self.type = type_()
else:
self.type = type_
def _with_value(self, value):
"""Return a copy of this :class:`.BindParameter` with the given value
set.
"""
cloned = self._clone()
cloned.value = value
cloned.callable = None
cloned.required = False
if cloned.type is type_api.NULLTYPE:
cloned.type = type_api._type_map.get(type(value),
type_api.NULLTYPE)
return cloned
@property
def effective_value(self):
"""Return the value of this bound parameter,
taking into account if the ``callable`` parameter
was set.
The ``callable`` value will be evaluated
and returned if present, else ``value``.
"""
if self.callable:
return self.callable()
else:
return self.value
def _clone(self):
c = ClauseElement._clone(self)
if self.unique:
c.key = _anonymous_label('%%(%d %s)s' % (id(c), c._orig_key
or 'param'))
return c
def _convert_to_unique(self):
if not self.unique:
self.unique = True
self.key = _anonymous_label(
'%%(%d %s)s' % (id(self), self._orig_key or 'param'))
def compare(self, other, **kw):
"""Compare this :class:`BindParameter` to the given
clause."""
return isinstance(other, BindParameter) \
and self.type._compare_type_affinity(other.type) \
and self.value == other.value
def __getstate__(self):
"""execute a deferred value for serialization purposes."""
d = self.__dict__.copy()
v = self.value
if self.callable:
v = self.callable()
d['callable'] = None
d['value'] = v
return d
def __repr__(self):
return 'BindParameter(%r, %r, type_=%r)' % (self.key,
self.value, self.type)
class TypeClause(ClauseElement):
"""Handle a type keyword in a SQL statement.
Used by the ``Case`` statement.
"""
__visit_name__ = 'typeclause'
def __init__(self, type):
self.type = type
class TextClause(Executable, ClauseElement):
"""Represent a literal SQL text fragment.
E.g.::
from sqlalchemy import text
t = text("SELECT * FROM users")
result = connection.execute(t)
The :class:`.Text` construct is produced using the :func:`.text`
function; see that function for full documentation.
.. seealso::
:func:`.text`
"""
__visit_name__ = 'textclause'
_bind_params_regex = re.compile(r'(?<![:\w\x5c]):(\w+)(?!:)', re.UNICODE)
_execution_options = \
Executable._execution_options.union(
{'autocommit': PARSE_AUTOCOMMIT})
@property
def _select_iterable(self):
return (self,)
@property
def selectable(self):
return self
_hide_froms = []
def __init__(
self,
text,
bind=None):
self._bind = bind
self._bindparams = {}
def repl(m):
self._bindparams[m.group(1)] = BindParameter(m.group(1))
return ':%s' % m.group(1)
# scan the string and search for bind parameter names, add them
# to the list of bindparams
self.text = self._bind_params_regex.sub(repl, text)
@classmethod
def _create_text(self, text, bind=None, bindparams=None,
typemap=None, autocommit=None):
"""Construct a new :class:`.TextClause` clause, representing
a textual SQL string directly.
E.g.::
fom sqlalchemy import text
t = text("SELECT * FROM users")
result = connection.execute(t)
The advantages :func:`.text` provides over a plain string are
backend-neutral support for bind parameters, per-statement
execution options, as well as
bind parameter and result-column typing behavior, allowing
SQLAlchemy type constructs to play a role when executing
a statement that is specified literally. The construct can also
be provided with a ``.c`` collection of column elements, allowing
it to be embedded in other SQL expression constructs as a subquery.
Bind parameters are specified by name, using the format ``:name``.
E.g.::
t = text("SELECT * FROM users WHERE id=:user_id")
result = connection.execute(t, user_id=12)
For SQL statements where a colon is required verbatim, as within
an inline string, use a backslash to escape::
t = text("SELECT * FROM users WHERE name='\\:username'")
The :class:`.TextClause` construct includes methods which can
provide information about the bound parameters as well as the column
values which would be returned from the textual statement, assuming
it's an executable SELECT type of statement. The
:meth:`.TextClause.bindparams` method is used to provide bound
parameter detail, and :meth:`.TextClause.columns` method allows
specification of return columns including names and types::
t = text("SELECT * FROM users WHERE id=:user_id").\\
bindparams(user_id=7).\\
columns(id=Integer, name=String)
for id, name in connection.execute(t):
print(id, name)
The :func:`.text` construct is used internally in cases when
a literal string is specified for part of a larger query, such as
when a string is specified to the :meth:`.Select.where` method of
:class:`.Select`. In those cases, the same
bind parameter syntax is applied::
s = select([users.c.id, users.c.name]).where("id=:user_id")
result = connection.execute(s, user_id=12)
Using :func:`.text` explicitly usually implies the construction
of a full, standalone statement. As such, SQLAlchemy refers
to it as an :class:`.Executable` object, and it supports
the :meth:`Executable.execution_options` method. For example,
a :func:`.text` construct that should be subject to "autocommit"
can be set explicitly so using the
:paramref:`.Connection.execution_options.autocommit` option::
t = text("EXEC my_procedural_thing()").\\
execution_options(autocommit=True)
Note that SQLAlchemy's usual "autocommit" behavior applies to
:func:`.text` constructs implicitly - that is, statements which begin
with a phrase such as ``INSERT``, ``UPDATE``, ``DELETE``,
or a variety of other phrases specific to certain backends, will
be eligible for autocommit if no transaction is in progress.
:param text:
the text of the SQL statement to be created. use ``:<param>``
to specify bind parameters; they will be compiled to their
engine-specific format.
:param autocommit:
Deprecated. Use .execution_options(autocommit=<True|False>)
to set the autocommit option.
:param bind:
an optional connection or engine to be used for this text query.
:param bindparams:
Deprecated. A list of :func:`.bindparam` instances used to
provide information about parameters embedded in the statement.
This argument now invokes the :meth:`.TextClause.bindparams`
method on the construct before returning it. E.g.::
stmt = text("SELECT * FROM table WHERE id=:id",
bindparams=[bindparam('id', value=5, type_=Integer)])
Is equivalent to::
stmt = text("SELECT * FROM table WHERE id=:id").\\
bindparams(bindparam('id', value=5, type_=Integer))
.. deprecated:: 0.9.0 the :meth:`.TextClause.bindparams` method
supersedes the ``bindparams`` argument to :func:`.text`.
:param typemap:
Deprecated. A dictionary mapping the names of columns
represented in the columns clause of a ``SELECT`` statement
to type objects,
which will be used to perform post-processing on columns within
the result set. This parameter now invokes the
:meth:`.TextClause.columns` method, which returns a
:class:`.TextAsFrom` construct that gains a ``.c`` collection and
can be embedded in other expressions. E.g.::
stmt = text("SELECT * FROM table",
typemap={'id': Integer, 'name': String},
)
Is equivalent to::
stmt = text("SELECT * FROM table").columns(id=Integer,
name=String)
Or alternatively::
from sqlalchemy.sql import column
stmt = text("SELECT * FROM table").columns(
column('id', Integer),
column('name', String)
)
.. deprecated:: 0.9.0 the :meth:`.TextClause.columns` method
supersedes the ``typemap`` argument to :func:`.text`.
"""
stmt = TextClause(text, bind=bind)
if bindparams:
stmt = stmt.bindparams(*bindparams)
if typemap:
stmt = stmt.columns(**typemap)
if autocommit is not None:
util.warn_deprecated('autocommit on text() is deprecated. '
'Use .execution_options(autocommit=True)')
stmt = stmt.execution_options(autocommit=autocommit)
return stmt
@_generative
def bindparams(self, *binds, **names_to_values):
"""Establish the values and/or types of bound parameters within
this :class:`.TextClause` construct.
Given a text construct such as::
from sqlalchemy import text
stmt = text("SELECT id, name FROM user WHERE name=:name "
"AND timestamp=:timestamp")
the :meth:`.TextClause.bindparams` method can be used to establish
the initial value of ``:name`` and ``:timestamp``,
using simple keyword arguments::
stmt = stmt.bindparams(name='jack',
timestamp=datetime.datetime(2012, 10, 8, 15, 12, 5))
Where above, new :class:`.BindParameter` objects
will be generated with the names ``name`` and ``timestamp``, and
values of ``jack`` and ``datetime.datetime(2012, 10, 8, 15, 12, 5)``,
respectively. The types will be
inferred from the values given, in this case :class:`.String` and
:class:`.DateTime`.
When specific typing behavior is needed, the positional ``*binds``
argument can be used in which to specify :func:`.bindparam` constructs
directly. These constructs must include at least the ``key``
argument, then an optional value and type::
from sqlalchemy import bindparam
stmt = stmt.bindparams(
bindparam('name', value='jack', type_=String),
bindparam('timestamp', type_=DateTime)
)
Above, we specified the type of :class:`.DateTime` for the
``timestamp`` bind, and the type of :class:`.String` for the ``name``
bind. In the case of ``name`` we also set the default value of
``"jack"``.
Additional bound parameters can be supplied at statement execution
time, e.g.::
result = connection.execute(stmt,
timestamp=datetime.datetime(2012, 10, 8, 15, 12, 5))
The :meth:`.TextClause.bindparams` method can be called repeatedly,
where it will re-use existing :class:`.BindParameter` objects to add
new information. For example, we can call
:meth:`.TextClause.bindparams` first with typing information, and a
second time with value information, and it will be combined::
stmt = text("SELECT id, name FROM user WHERE name=:name "
"AND timestamp=:timestamp")
stmt = stmt.bindparams(
bindparam('name', type_=String),
bindparam('timestamp', type_=DateTime)
)
stmt = stmt.bindparams(
name='jack',
timestamp=datetime.datetime(2012, 10, 8, 15, 12, 5)
)
.. versionadded:: 0.9.0 The :meth:`.TextClause.bindparams` method
supersedes the argument ``bindparams`` passed to
:func:`~.expression.text`.
"""
self._bindparams = new_params = self._bindparams.copy()
for bind in binds:
try:
existing = new_params[bind.key]
except KeyError:
raise exc.ArgumentError(
"This text() construct doesn't define a "
"bound parameter named %r" % bind.key)
else:
new_params[existing.key] = bind
for key, value in names_to_values.items():
try:
existing = new_params[key]
except KeyError:
raise exc.ArgumentError(
"This text() construct doesn't define a "
"bound parameter named %r" % key)
else:
new_params[key] = existing._with_value(value)
@util.dependencies('sqlalchemy.sql.selectable')
def columns(self, selectable, *cols, **types):
"""Turn this :class:`.TextClause` object into a :class:`.TextAsFrom`
object that can be embedded into another statement.
This function essentially bridges the gap between an entirely
textual SELECT statement and the SQL expression language concept
of a "selectable"::
from sqlalchemy.sql import column, text
stmt = text("SELECT id, name FROM some_table")
stmt = stmt.columns(column('id'), column('name')).alias('st')
stmt = select([mytable]).\\
select_from(
mytable.join(stmt, mytable.c.name == stmt.c.name)
).where(stmt.c.id > 5)
Above, we used untyped :func:`.column` elements. These can also have
types specified, which will impact how the column behaves in
expressions as well as determining result set behavior::
stmt = text("SELECT id, name, timestamp FROM some_table")
stmt = stmt.columns(
column('id', Integer),
column('name', Unicode),
column('timestamp', DateTime)
)
for id, name, timestamp in connection.execute(stmt):
print(id, name, timestamp)
Keyword arguments allow just the names and types of columns to be
specified, where the :func:`.column` elements will be generated
automatically::
stmt = text("SELECT id, name, timestamp FROM some_table")
stmt = stmt.columns(
id=Integer,
name=Unicode,
timestamp=DateTime
)
for id, name, timestamp in connection.execute(stmt):
print(id, name, timestamp)
The :meth:`.TextClause.columns` method provides a direct
route to calling :meth:`.FromClause.alias` as well as
:meth:`.SelectBase.cte` against a textual SELECT statement::
stmt = stmt.columns(id=Integer, name=String).cte('st')
stmt = select([sometable]).where(sometable.c.id == stmt.c.id)
.. versionadded:: 0.9.0 :func:`.text` can now be converted into a
fully featured "selectable" construct using the
:meth:`.TextClause.columns` method. This method supersedes the
``typemap`` argument to :func:`.text`.
"""
input_cols = [
ColumnClause(col.key, types.pop(col.key))
if col.key in types
else col
for col in cols
] + [ColumnClause(key, type_) for key, type_ in types.items()]
return selectable.TextAsFrom(self, input_cols)
@property
def type(self):
return type_api.NULLTYPE
@property
def comparator(self):
return self.type.comparator_factory(self)
def self_group(self, against=None):
if against is operators.in_op:
return Grouping(self)
else:
return self
def _copy_internals(self, clone=_clone, **kw):
self._bindparams = dict((b.key, clone(b, **kw))
for b in self._bindparams.values())
def get_children(self, **kwargs):
return list(self._bindparams.values())
def compare(self, other):
return isinstance(other, TextClause) and other.text == self.text
class Null(ColumnElement):
"""Represent the NULL keyword in a SQL statement.
:class:`.Null` is accessed as a constant via the
:func:`.null` function.
"""
__visit_name__ = 'null'
@util.memoized_property
def type(self):
return type_api.NULLTYPE
@classmethod
def _singleton(cls):
"""Return a constant :class:`.Null` construct."""
return NULL
def compare(self, other):
return isinstance(other, Null)
class False_(ColumnElement):
"""Represent the ``false`` keyword, or equivalent, in a SQL statement.
:class:`.False_` is accessed as a constant via the
:func:`.false` function.
"""
__visit_name__ = 'false'
@util.memoized_property
def type(self):
return type_api.BOOLEANTYPE
def _negate(self):
return TRUE
@classmethod
def _singleton(cls):
"""Return a constant :class:`.False_` construct.
E.g.::
>>> from sqlalchemy import false
>>> print select([t.c.x]).where(false())
SELECT x FROM t WHERE false
A backend which does not support true/false constants will render as
an expression against 1 or 0::
>>> print select([t.c.x]).where(false())
SELECT x FROM t WHERE 0 = 1
The :func:`.true` and :func:`.false` constants also feature
"short circuit" operation within an :func:`.and_` or :func:`.or_`
conjunction::
>>> print select([t.c.x]).where(or_(t.c.x > 5, true()))
SELECT x FROM t WHERE true
>>> print select([t.c.x]).where(and_(t.c.x > 5, false()))
SELECT x FROM t WHERE false
.. versionchanged:: 0.9 :func:`.true` and :func:`.false` feature
better integrated behavior within conjunctions and on dialects
that don't support true/false constants.
.. seealso::
:func:`.true`
"""
return FALSE
def compare(self, other):
return isinstance(other, False_)
class True_(ColumnElement):
"""Represent the ``true`` keyword, or equivalent, in a SQL statement.
:class:`.True_` is accessed as a constant via the
:func:`.true` function.
"""
__visit_name__ = 'true'
@util.memoized_property
def type(self):
return type_api.BOOLEANTYPE
def _negate(self):
return FALSE
@classmethod
def _ifnone(cls, other):
if other is None:
return cls._singleton()
else:
return other
@classmethod
def _singleton(cls):
"""Return a constant :class:`.True_` construct.
E.g.::
>>> from sqlalchemy import true
>>> print select([t.c.x]).where(true())
SELECT x FROM t WHERE true
A backend which does not support true/false constants will render as
an expression against 1 or 0::
>>> print select([t.c.x]).where(true())
SELECT x FROM t WHERE 1 = 1
The :func:`.true` and :func:`.false` constants also feature
"short circuit" operation within an :func:`.and_` or :func:`.or_`
conjunction::
>>> print select([t.c.x]).where(or_(t.c.x > 5, true()))
SELECT x FROM t WHERE true
>>> print select([t.c.x]).where(and_(t.c.x > 5, false()))
SELECT x FROM t WHERE false
.. versionchanged:: 0.9 :func:`.true` and :func:`.false` feature
better integrated behavior within conjunctions and on dialects
that don't support true/false constants.
.. seealso::
:func:`.false`
"""
return TRUE
def compare(self, other):
return isinstance(other, True_)
NULL = Null()
FALSE = False_()
TRUE = True_()
class ClauseList(ClauseElement):
"""Describe a list of clauses, separated by an operator.
By default, is comma-separated, such as a column listing.
"""
__visit_name__ = 'clauselist'
def __init__(self, *clauses, **kwargs):
self.operator = kwargs.pop('operator', operators.comma_op)
self.group = kwargs.pop('group', True)
self.group_contents = kwargs.pop('group_contents', True)
if self.group_contents:
self.clauses = [
_literal_as_text(clause).self_group(against=self.operator)
for clause in clauses]
else:
self.clauses = [
_literal_as_text(clause)
for clause in clauses]
def __iter__(self):
return iter(self.clauses)
def __len__(self):
return len(self.clauses)
@property
def _select_iterable(self):
return iter(self)
def append(self, clause):
if self.group_contents:
self.clauses.append(_literal_as_text(clause).
self_group(against=self.operator))
else:
self.clauses.append(_literal_as_text(clause))
def _copy_internals(self, clone=_clone, **kw):
self.clauses = [clone(clause, **kw) for clause in self.clauses]
def get_children(self, **kwargs):
return self.clauses
@property
def _from_objects(self):
return list(itertools.chain(*[c._from_objects for c in self.clauses]))
def self_group(self, against=None):
if self.group and operators.is_precedent(self.operator, against):
return Grouping(self)
else:
return self
def compare(self, other, **kw):
"""Compare this :class:`.ClauseList` to the given :class:`.ClauseList`,
including a comparison of all the clause items.
"""
if not isinstance(other, ClauseList) and len(self.clauses) == 1:
return self.clauses[0].compare(other, **kw)
elif isinstance(other, ClauseList) and \
len(self.clauses) == len(other.clauses):
for i in range(0, len(self.clauses)):
if not self.clauses[i].compare(other.clauses[i], **kw):
return False
else:
return self.operator == other.operator
else:
return False
class BooleanClauseList(ClauseList, ColumnElement):
__visit_name__ = 'clauselist'
def __init__(self, *arg, **kw):
raise NotImplementedError(
"BooleanClauseList has a private constructor")
@classmethod
def _construct(cls, operator, continue_on, skip_on, *clauses, **kw):
convert_clauses = []
clauses = util.coerce_generator_arg(clauses)
for clause in clauses:
clause = _literal_as_text(clause)
if isinstance(clause, continue_on):
continue
elif isinstance(clause, skip_on):
return clause.self_group(against=operators._asbool)
convert_clauses.append(clause)
if len(convert_clauses) == 1:
return convert_clauses[0].self_group(against=operators._asbool)
elif not convert_clauses and clauses:
return clauses[0].self_group(against=operators._asbool)
convert_clauses = [c.self_group(against=operator)
for c in convert_clauses]
self = cls.__new__(cls)
self.clauses = convert_clauses
self.group = True
self.operator = operator
self.group_contents = True
self.type = type_api.BOOLEANTYPE
return self
@classmethod
def and_(cls, *clauses):
"""Produce a conjunction of expressions joined by ``AND``.
E.g.::
from sqlalchemy import and_
stmt = select([users_table]).where(
and_(
users_table.c.name == 'wendy',
users_table.c.enrolled == True
)
)
The :func:`.and_` conjunction is also available using the
Python ``&`` operator (though note that compound expressions
need to be parenthesized in order to function with Python
operator precedence behavior)::
stmt = select([users_table]).where(
(users_table.c.name == 'wendy') &
(users_table.c.enrolled == True)
)
The :func:`.and_` operation is also implicit in some cases;
the :meth:`.Select.where` method for example can be invoked multiple
times against a statement, which will have the effect of each
clause being combined using :func:`.and_`::
stmt = select([users_table]).\\
where(users_table.c.name == 'wendy').\\
where(users_table.c.enrolled == True)
.. seealso::
:func:`.or_`
"""
return cls._construct(operators.and_, True_, False_, *clauses)
@classmethod
def or_(cls, *clauses):
"""Produce a conjunction of expressions joined by ``OR``.
E.g.::
from sqlalchemy import or_
stmt = select([users_table]).where(
or_(
users_table.c.name == 'wendy',
users_table.c.name == 'jack'
)
)
The :func:`.or_` conjunction is also available using the
Python ``|`` operator (though note that compound expressions
need to be parenthesized in order to function with Python
operator precedence behavior)::
stmt = select([users_table]).where(
(users_table.c.name == 'wendy') |
(users_table.c.name == 'jack')
)
.. seealso::
:func:`.and_`
"""
return cls._construct(operators.or_, False_, True_, *clauses)
@property
def _select_iterable(self):
return (self, )
def self_group(self, against=None):
if not self.clauses:
return self
else:
return super(BooleanClauseList, self).self_group(against=against)
def _negate(self):
return ClauseList._negate(self)
and_ = BooleanClauseList.and_
or_ = BooleanClauseList.or_
class Tuple(ClauseList, ColumnElement):
"""Represent a SQL tuple."""
def __init__(self, *clauses, **kw):
"""Return a :class:`.Tuple`.
Main usage is to produce a composite IN construct::
from sqlalchemy import tuple_
tuple_(table.c.col1, table.c.col2).in_(
[(1, 2), (5, 12), (10, 19)]
)
.. warning::
The composite IN construct is not supported by all backends,
and is currently known to work on Postgresql and MySQL,
but not SQLite. Unsupported backends will raise
a subclass of :class:`~sqlalchemy.exc.DBAPIError` when such
an expression is invoked.
"""
clauses = [_literal_as_binds(c) for c in clauses]
self._type_tuple = [arg.type for arg in clauses]
self.type = kw.pop('type_', self._type_tuple[0]
if self._type_tuple else type_api.NULLTYPE)
super(Tuple, self).__init__(*clauses, **kw)
@property
def _select_iterable(self):
return (self, )
def _bind_param(self, operator, obj):
return Tuple(*[
BindParameter(None, o, _compared_to_operator=operator,
_compared_to_type=type_, unique=True)
for o, type_ in zip(obj, self._type_tuple)
]).self_group()
class Case(ColumnElement):
"""Represent a ``CASE`` expression.
:class:`.Case` is produced using the :func:`.case` factory function,
as in::
from sqlalchemy import case
stmt = select([users_table]).\\
where(
case(
[
(users_table.c.name == 'wendy', 'W'),
(users_table.c.name == 'jack', 'J')
],
else_='E'
)
)
Details on :class:`.Case` usage is at :func:`.case`.
.. seealso::
:func:`.case`
"""
__visit_name__ = 'case'
def __init__(self, whens, value=None, else_=None):
"""Produce a ``CASE`` expression.
The ``CASE`` construct in SQL is a conditional object that
acts somewhat analogously to an "if/then" construct in other
languages. It returns an instance of :class:`.Case`.
:func:`.case` in its usual form is passed a list of "when"
constructs, that is, a list of conditions and results as tuples::
from sqlalchemy import case
stmt = select([users_table]).\\
where(
case(
[
(users_table.c.name == 'wendy', 'W'),
(users_table.c.name == 'jack', 'J')
],
else_='E'
)
)
The above statement will produce SQL resembling::
SELECT id, name FROM user
WHERE CASE
WHEN (name = :name_1) THEN :param_1
WHEN (name = :name_2) THEN :param_2
ELSE :param_3
END
When simple equality expressions of several values against a single
parent column are needed, :func:`.case` also has a "shorthand" format
used via the
:paramref:`.case.value` parameter, which is passed a column
expression to be compared. In this form, the :paramref:`.case.whens`
parameter is passed as a dictionary containing expressions to be
compared against keyed to result expressions. The statement below is
equivalent to the preceding statement::
stmt = select([users_table]).\\
where(
case(
{"wendy": "W", "jack": "J"},
value=users_table.c.name,
else_='E'
)
)
The values which are accepted as result values in
:paramref:`.case.whens` as well as with :paramref:`.case.else_` are
coerced from Python literals into :func:`.bindparam` constructs.
SQL expressions, e.g. :class:`.ColumnElement` constructs, are accepted
as well. To coerce a literal string expression into a constant
expression rendered inline, use the :func:`.literal_column` construct,
as in::
from sqlalchemy import case, literal_column
case(
[
(
orderline.c.qty > 100,
literal_column("'greaterthan100'")
),
(
orderline.c.qty > 10,
literal_column("'greaterthan10'")
)
],
else_=literal_column("'lessthan10'")
)
The above will render the given constants without using bound
parameters for the result values (but still for the comparison
values), as in::
CASE
WHEN (orderline.qty > :qty_1) THEN 'greaterthan100'
WHEN (orderline.qty > :qty_2) THEN 'greaterthan10'
ELSE 'lessthan10'
END
:param whens: The criteria to be compared against,
:paramref:`.case.whens` accepts two different forms, based on
whether or not :paramref:`.case.value` is used.
In the first form, it accepts a list of 2-tuples; each 2-tuple
consists of ``(<sql expression>, <value>)``, where the SQL
expression is a boolean expression and "value" is a resulting value,
e.g.::
case([
(users_table.c.name == 'wendy', 'W'),
(users_table.c.name == 'jack', 'J')
])
In the second form, it accepts a Python dictionary of comparison
values mapped to a resulting value; this form requires
:paramref:`.case.value` to be present, and values will be compared
using the ``==`` operator, e.g.::
case(
{"wendy": "W", "jack": "J"},
value=users_table.c.name
)
:param value: An optional SQL expression which will be used as a
fixed "comparison point" for candidate values within a dictionary
passed to :paramref:`.case.whens`.
:param else\_: An optional SQL expression which will be the evaluated
result of the ``CASE`` construct if all expressions within
:paramref:`.case.whens` evaluate to false. When omitted, most
databases will produce a result of NULL if none of the "when"
expressions evaulate to true.
"""
try:
whens = util.dictlike_iteritems(whens)
except TypeError:
pass
if value is not None:
whenlist = [
(_literal_as_binds(c).self_group(),
_literal_as_binds(r)) for (c, r) in whens
]
else:
whenlist = [
(_no_literals(c).self_group(),
_literal_as_binds(r)) for (c, r) in whens
]
if whenlist:
type_ = list(whenlist[-1])[-1].type
else:
type_ = None
if value is None:
self.value = None
else:
self.value = _literal_as_binds(value)
self.type = type_
self.whens = whenlist
if else_ is not None:
self.else_ = _literal_as_binds(else_)
else:
self.else_ = None
def _copy_internals(self, clone=_clone, **kw):
if self.value is not None:
self.value = clone(self.value, **kw)
self.whens = [(clone(x, **kw), clone(y, **kw))
for x, y in self.whens]
if self.else_ is not None:
self.else_ = clone(self.else_, **kw)
def get_children(self, **kwargs):
if self.value is not None:
yield self.value
for x, y in self.whens:
yield x
yield y
if self.else_ is not None:
yield self.else_
@property
def _from_objects(self):
return list(itertools.chain(*[x._from_objects for x in
self.get_children()]))
def literal_column(text, type_=None):
"""Produce a :class:`.ColumnClause` object that has the
:paramref:`.column.is_literal` flag set to True.
:func:`.literal_column` is similar to :func:`.column`, except that
it is more often used as a "standalone" column expression that renders
exactly as stated; while :func:`.column` stores a string name that
will be assumed to be part of a table and may be quoted as such,
:func:`.literal_column` can be that, or any other arbitrary column-oriented
expression.
:param text: the text of the expression; can be any SQL expression.
Quoting rules will not be applied. To specify a column-name expression
which should be subject to quoting rules, use the :func:`column`
function.
:param type\_: an optional :class:`~sqlalchemy.types.TypeEngine`
object which will
provide result-set translation and additional expression semantics for
this column. If left as None the type will be NullType.
.. seealso::
:func:`.column`
:func:`.text`
:ref:`sqlexpression_literal_column`
"""
return ColumnClause(text, type_=type_, is_literal=True)
class Cast(ColumnElement):
"""Represent a ``CAST`` expression.
:class:`.Cast` is produced using the :func:`.cast` factory function,
as in::
from sqlalchemy import cast, Numeric
stmt = select([
cast(product_table.c.unit_price, Numeric(10, 4))
])
Details on :class:`.Cast` usage is at :func:`.cast`.
.. seealso::
:func:`.cast`
"""
__visit_name__ = 'cast'
def __init__(self, expression, type_):
"""Produce a ``CAST`` expression.
:func:`.cast` returns an instance of :class:`.Cast`.
E.g.::
from sqlalchemy import cast, Numeric
stmt = select([
cast(product_table.c.unit_price, Numeric(10, 4))
])
The above statement will produce SQL resembling::
SELECT CAST(unit_price AS NUMERIC(10, 4)) FROM product
The :func:`.cast` function performs two distinct functions when
used. The first is that it renders the ``CAST`` expression within
the resulting SQL string. The second is that it associates the given
type (e.g. :class:`.TypeEngine` class or instance) with the column
expression on the Python side, which means the expression will take
on the expression operator behavior associated with that type,
as well as the bound-value handling and result-row-handling behavior
of the type.
.. versionchanged:: 0.9.0 :func:`.cast` now applies the given type
to the expression such that it takes effect on the bound-value,
e.g. the Python-to-database direction, in addition to the
result handling, e.g. database-to-Python, direction.
An alternative to :func:`.cast` is the :func:`.type_coerce` function.
This function performs the second task of associating an expression
with a specific type, but does not render the ``CAST`` expression
in SQL.
:param expression: A SQL expression, such as a :class:`.ColumnElement`
expression or a Python string which will be coerced into a bound
literal value.
:param type_: A :class:`.TypeEngine` class or instance indicating
the type to which the ``CAST`` should apply.
.. seealso::
:func:`.type_coerce` - Python-side type coercion without emitting
CAST.
"""
self.type = type_api.to_instance(type_)
self.clause = _literal_as_binds(expression, type_=self.type)
self.typeclause = TypeClause(self.type)
def _copy_internals(self, clone=_clone, **kw):
self.clause = clone(self.clause, **kw)
self.typeclause = clone(self.typeclause, **kw)
def get_children(self, **kwargs):
return self.clause, self.typeclause
@property
def _from_objects(self):
return self.clause._from_objects
class Extract(ColumnElement):
"""Represent a SQL EXTRACT clause, ``extract(field FROM expr)``."""
__visit_name__ = 'extract'
def __init__(self, field, expr, **kwargs):
"""Return a :class:`.Extract` construct.
This is typically available as :func:`.extract`
as well as ``func.extract`` from the
:data:`.func` namespace.
"""
self.type = type_api.INTEGERTYPE
self.field = field
self.expr = _literal_as_binds(expr, None)
def _copy_internals(self, clone=_clone, **kw):
self.expr = clone(self.expr, **kw)
def get_children(self, **kwargs):
return self.expr,
@property
def _from_objects(self):
return self.expr._from_objects
class UnaryExpression(ColumnElement):
"""Define a 'unary' expression.
A unary expression has a single column expression
and an operator. The operator can be placed on the left
(where it is called the 'operator') or right (where it is called the
'modifier') of the column expression.
:class:`.UnaryExpression` is the basis for several unary operators
including those used by :func:`.desc`, :func:`.asc`, :func:`.distinct`,
:func:`.nullsfirst` and :func:`.nullslast`.
"""
__visit_name__ = 'unary'
def __init__(self, element, operator=None, modifier=None,
type_=None, negate=None):
self.operator = operator
self.modifier = modifier
self.element = element.self_group(
against=self.operator or self.modifier)
self.type = type_api.to_instance(type_)
self.negate = negate
@classmethod
def _create_nullsfirst(cls, column):
"""Produce the ``NULLS FIRST`` modifier for an ``ORDER BY`` expression.
:func:`.nullsfirst` is intended to modify the expression produced
by :func:`.asc` or :func:`.desc`, and indicates how NULL values
should be handled when they are encountered during ordering::
from sqlalchemy import desc, nullsfirst
stmt = select([users_table]).\\
order_by(nullsfirst(desc(users_table.c.name)))
The SQL expression from the above would resemble::
SELECT id, name FROM user ORDER BY name DESC NULLS FIRST
Like :func:`.asc` and :func:`.desc`, :func:`.nullsfirst` is typically
invoked from the column expression itself using
:meth:`.ColumnElement.nullsfirst`, rather than as its standalone
function version, as in::
stmt = (select([users_table]).
order_by(users_table.c.name.desc().nullsfirst())
)
.. seealso::
:func:`.asc`
:func:`.desc`
:func:`.nullslast`
:meth:`.Select.order_by`
"""
return UnaryExpression(
_literal_as_text(column), modifier=operators.nullsfirst_op)
@classmethod
def _create_nullslast(cls, column):
"""Produce the ``NULLS LAST`` modifier for an ``ORDER BY`` expression.
:func:`.nullslast` is intended to modify the expression produced
by :func:`.asc` or :func:`.desc`, and indicates how NULL values
should be handled when they are encountered during ordering::
from sqlalchemy import desc, nullslast
stmt = select([users_table]).\\
order_by(nullslast(desc(users_table.c.name)))
The SQL expression from the above would resemble::
SELECT id, name FROM user ORDER BY name DESC NULLS LAST
Like :func:`.asc` and :func:`.desc`, :func:`.nullslast` is typically
invoked from the column expression itself using
:meth:`.ColumnElement.nullslast`, rather than as its standalone
function version, as in::
stmt = select([users_table]).\\
order_by(users_table.c.name.desc().nullslast())
.. seealso::
:func:`.asc`
:func:`.desc`
:func:`.nullsfirst`
:meth:`.Select.order_by`
"""
return UnaryExpression(
_literal_as_text(column), modifier=operators.nullslast_op)
@classmethod
def _create_desc(cls, column):
"""Produce a descending ``ORDER BY`` clause element.
e.g.::
from sqlalchemy import desc
stmt = select([users_table]).order_by(desc(users_table.c.name))
will produce SQL as::
SELECT id, name FROM user ORDER BY name DESC
The :func:`.desc` function is a standalone version of the
:meth:`.ColumnElement.desc` method available on all SQL expressions,
e.g.::
stmt = select([users_table]).order_by(users_table.c.name.desc())
:param column: A :class:`.ColumnElement` (e.g. scalar SQL expression)
with which to apply the :func:`.desc` operation.
.. seealso::
:func:`.asc`
:func:`.nullsfirst`
:func:`.nullslast`
:meth:`.Select.order_by`
"""
return UnaryExpression(
_literal_as_text(column), modifier=operators.desc_op)
@classmethod
def _create_asc(cls, column):
"""Produce an ascending ``ORDER BY`` clause element.
e.g.::
from sqlalchemy import asc
stmt = select([users_table]).order_by(asc(users_table.c.name))
will produce SQL as::
SELECT id, name FROM user ORDER BY name ASC
The :func:`.asc` function is a standalone version of the
:meth:`.ColumnElement.asc` method available on all SQL expressions,
e.g.::
stmt = select([users_table]).order_by(users_table.c.name.asc())
:param column: A :class:`.ColumnElement` (e.g. scalar SQL expression)
with which to apply the :func:`.asc` operation.
.. seealso::
:func:`.desc`
:func:`.nullsfirst`
:func:`.nullslast`
:meth:`.Select.order_by`
"""
return UnaryExpression(
_literal_as_text(column), modifier=operators.asc_op)
@classmethod
def _create_distinct(cls, expr):
"""Produce an column-expression-level unary ``DISTINCT`` clause.
This applies the ``DISTINCT`` keyword to an individual column
expression, and is typically contained within an aggregate function,
as in::
from sqlalchemy import distinct, func
stmt = select([func.count(distinct(users_table.c.name))])
The above would produce an expression resembling::
SELECT COUNT(DISTINCT name) FROM user
The :func:`.distinct` function is also available as a column-level
method, e.g. :meth:`.ColumnElement.distinct`, as in::
stmt = select([func.count(users_table.c.name.distinct())])
The :func:`.distinct` operator is different from the
:meth:`.Select.distinct` method of :class:`.Select`,
which produces a ``SELECT`` statement
with ``DISTINCT`` applied to the result set as a whole,
e.g. a ``SELECT DISTINCT`` expression. See that method for further
information.
.. seealso::
:meth:`.ColumnElement.distinct`
:meth:`.Select.distinct`
:data:`.func`
"""
expr = _literal_as_binds(expr)
return UnaryExpression(
expr, operator=operators.distinct_op, type_=expr.type)
@util.memoized_property
def _order_by_label_element(self):
if self.modifier in (operators.desc_op, operators.asc_op):
return self.element._order_by_label_element
else:
return None
@property
def _from_objects(self):
return self.element._from_objects
def _copy_internals(self, clone=_clone, **kw):
self.element = clone(self.element, **kw)
def get_children(self, **kwargs):
return self.element,
def compare(self, other, **kw):
"""Compare this :class:`UnaryExpression` against the given
:class:`.ClauseElement`."""
return (
isinstance(other, UnaryExpression) and
self.operator == other.operator and
self.modifier == other.modifier and
self.element.compare(other.element, **kw)
)
def _negate(self):
if self.negate is not None:
return UnaryExpression(
self.element,
operator=self.negate,
negate=self.operator,
modifier=self.modifier,
type_=self.type)
else:
return ClauseElement._negate(self)
def self_group(self, against=None):
if self.operator and operators.is_precedent(self.operator, against):
return Grouping(self)
else:
return self
class AsBoolean(UnaryExpression):
def __init__(self, element, operator, negate):
self.element = element
self.type = type_api.BOOLEANTYPE
self.operator = operator
self.negate = negate
self.modifier = None
def self_group(self, against=None):
return self
def _negate(self):
return self.element._negate()
class BinaryExpression(ColumnElement):
"""Represent an expression that is ``LEFT <operator> RIGHT``.
A :class:`.BinaryExpression` is generated automatically
whenever two column expressions are used in a Python binary expression::
>>> from sqlalchemy.sql import column
>>> column('a') + column('b')
<sqlalchemy.sql.expression.BinaryExpression object at 0x101029dd0>
>>> print column('a') + column('b')
a + b
"""
__visit_name__ = 'binary'
def __init__(self, left, right, operator, type_=None,
negate=None, modifiers=None):
# allow compatibility with libraries that
# refer to BinaryExpression directly and pass strings
if isinstance(operator, util.string_types):
operator = operators.custom_op(operator)
self._orig = (left, right)
self.left = left.self_group(against=operator)
self.right = right.self_group(against=operator)
self.operator = operator
self.type = type_api.to_instance(type_)
self.negate = negate
if modifiers is None:
self.modifiers = {}
else:
self.modifiers = modifiers
def __bool__(self):
if self.operator in (operator.eq, operator.ne):
return self.operator(hash(self._orig[0]), hash(self._orig[1]))
else:
raise TypeError("Boolean value of this clause is not defined")
__nonzero__ = __bool__
@property
def is_comparison(self):
return operators.is_comparison(self.operator)
@property
def _from_objects(self):
return self.left._from_objects + self.right._from_objects
def _copy_internals(self, clone=_clone, **kw):
self.left = clone(self.left, **kw)
self.right = clone(self.right, **kw)
def get_children(self, **kwargs):
return self.left, self.right
def compare(self, other, **kw):
"""Compare this :class:`BinaryExpression` against the
given :class:`BinaryExpression`."""
return (
isinstance(other, BinaryExpression) and
self.operator == other.operator and
(
self.left.compare(other.left, **kw) and
self.right.compare(other.right, **kw) or
(
operators.is_commutative(self.operator) and
self.left.compare(other.right, **kw) and
self.right.compare(other.left, **kw)
)
)
)
def self_group(self, against=None):
if operators.is_precedent(self.operator, against):
return Grouping(self)
else:
return self
def _negate(self):
if self.negate is not None:
return BinaryExpression(
self.left,
self.right,
self.negate,
negate=self.operator,
type_=type_api.BOOLEANTYPE,
modifiers=self.modifiers)
else:
return super(BinaryExpression, self)._negate()
class Grouping(ColumnElement):
"""Represent a grouping within a column expression"""
__visit_name__ = 'grouping'
def __init__(self, element):
self.element = element
self.type = getattr(element, 'type', type_api.NULLTYPE)
def self_group(self, against=None):
return self
@property
def _label(self):
return getattr(self.element, '_label', None) or self.anon_label
def _copy_internals(self, clone=_clone, **kw):
self.element = clone(self.element, **kw)
def get_children(self, **kwargs):
return self.element,
@property
def _from_objects(self):
return self.element._from_objects
def __getattr__(self, attr):
return getattr(self.element, attr)
def __getstate__(self):
return {'element': self.element, 'type': self.type}
def __setstate__(self, state):
self.element = state['element']
self.type = state['type']
def compare(self, other, **kw):
return isinstance(other, Grouping) and \
self.element.compare(other.element)
class Over(ColumnElement):
"""Represent an OVER clause.
This is a special operator against a so-called
"window" function, as well as any aggregate function,
which produces results relative to the result set
itself. It's supported only by certain database
backends.
"""
__visit_name__ = 'over'
order_by = None
partition_by = None
def __init__(self, func, partition_by=None, order_by=None):
"""Produce an :class:`.Over` object against a function.
Used against aggregate or so-called "window" functions,
for database backends that support window functions.
E.g.::
from sqlalchemy import over
over(func.row_number(), order_by='x')
Would produce "ROW_NUMBER() OVER(ORDER BY x)".
:param func: a :class:`.FunctionElement` construct, typically
generated by :data:`~.expression.func`.
:param partition_by: a column element or string, or a list
of such, that will be used as the PARTITION BY clause
of the OVER construct.
:param order_by: a column element or string, or a list
of such, that will be used as the ORDER BY clause
of the OVER construct.
This function is also available from the :data:`~.expression.func`
construct itself via the :meth:`.FunctionElement.over` method.
.. versionadded:: 0.7
"""
self.func = func
if order_by is not None:
self.order_by = ClauseList(*util.to_list(order_by))
if partition_by is not None:
self.partition_by = ClauseList(*util.to_list(partition_by))
@util.memoized_property
def type(self):
return self.func.type
def get_children(self, **kwargs):
return [c for c in
(self.func, self.partition_by, self.order_by)
if c is not None]
def _copy_internals(self, clone=_clone, **kw):
self.func = clone(self.func, **kw)
if self.partition_by is not None:
self.partition_by = clone(self.partition_by, **kw)
if self.order_by is not None:
self.order_by = clone(self.order_by, **kw)
@property
def _from_objects(self):
return list(itertools.chain(
*[c._from_objects for c in
(self.func, self.partition_by, self.order_by)
if c is not None]
))
class Label(ColumnElement):
"""Represents a column label (AS).
Represent a label, as typically applied to any column-level
element using the ``AS`` sql keyword.
"""
__visit_name__ = 'label'
def __init__(self, name, element, type_=None):
"""Return a :class:`Label` object for the
given :class:`.ColumnElement`.
A label changes the name of an element in the columns clause of a
``SELECT`` statement, typically via the ``AS`` SQL keyword.
This functionality is more conveniently available via the
:meth:`.ColumnElement.label` method on :class:`.ColumnElement`.
:param name: label name
:param obj: a :class:`.ColumnElement`.
"""
while isinstance(element, Label):
element = element.element
if name:
self.name = name
else:
self.name = _anonymous_label(
'%%(%d %s)s' % (id(self), getattr(element, 'name', 'anon'))
)
self.key = self._label = self._key_label = self.name
self._element = element
self._type = type_
self._proxies = [element]
def __reduce__(self):
return self.__class__, (self.name, self._element, self._type)
@util.memoized_property
def _order_by_label_element(self):
return self
@util.memoized_property
def type(self):
return type_api.to_instance(
self._type or getattr(self._element, 'type', None)
)
@util.memoized_property
def element(self):
return self._element.self_group(against=operators.as_)
def self_group(self, against=None):
sub_element = self._element.self_group(against=against)
if sub_element is not self._element:
return Label(self.name,
sub_element,
type_=self._type)
else:
return self
@property
def primary_key(self):
return self.element.primary_key
@property
def foreign_keys(self):
return self.element.foreign_keys
def get_children(self, **kwargs):
return self.element,
def _copy_internals(self, clone=_clone, **kw):
self.element = clone(self.element, **kw)
@property
def _from_objects(self):
return self.element._from_objects
def _make_proxy(self, selectable, name=None, **kw):
e = self.element._make_proxy(selectable,
name=name if name else self.name)
e._proxies.append(self)
if self._type is not None:
e.type = self._type
return e
class ColumnClause(Immutable, ColumnElement):
"""Represents a column expression from any textual string.
The :class:`.ColumnClause`, a lightweight analogue to the
:class:`.Column` class, is typically invoked using the
:func:`.column` function, as in::
from sqlalchemy.sql import column
id, name = column("id"), column("name")
stmt = select([id, name]).select_from("user")
The above statement would produce SQL like::
SELECT id, name FROM user
:class:`.ColumnClause` is the immediate superclass of the schema-specific
:class:`.Column` object. While the :class:`.Column` class has all the
same capabilities as :class:`.ColumnClause`, the :class:`.ColumnClause`
class is usable by itself in those cases where behavioral requirements
are limited to simple SQL expression generation. The object has none of
the associations with schema-level metadata or with execution-time
behavior that :class:`.Column` does, so in that sense is a "lightweight"
version of :class:`.Column`.
Full details on :class:`.ColumnClause` usage is at :func:`.column`.
.. seealso::
:func:`.column`
:class:`.Column`
"""
__visit_name__ = 'column'
onupdate = default = server_default = server_onupdate = None
_memoized_property = util.group_expirable_memoized_property()
def __init__(self, text, type_=None, is_literal=False, _selectable=None):
"""Produce a :class:`.ColumnClause` object.
The :class:`.ColumnClause` is a lightweight analogue to the
:class:`.Column` class. The :func:`.column` function can
be invoked with just a name alone, as in::
from sqlalchemy.sql import column
id, name = column("id"), column("name")
stmt = select([id, name]).select_from("user")
The above statement would produce SQL like::
SELECT id, name FROM user
Once constructed, :func:`.column` may be used like any other SQL
expression element such as within :func:`.select` constructs::
from sqlalchemy.sql import column
id, name = column("id"), column("name")
stmt = select([id, name]).select_from("user")
The text handled by :func:`.column` is assumed to be handled
like the name of a database column; if the string contains mixed case,
special characters, or matches a known reserved word on the target
backend, the column expression will render using the quoting
behavior determined by the backend. To produce a textual SQL
expression that is rendered exactly without any quoting,
use :func:`.literal_column` instead, or pass ``True`` as the
value of :paramref:`.column.is_literal`. Additionally, full SQL
statements are best handled using the :func:`.text` construct.
:func:`.column` can be used in a table-like
fashion by combining it with the :func:`.table` function
(which is the lightweight analogue to :class:`.Table`) to produce
a working table construct with minimal boilerplate::
from sqlalchemy.sql import table, column
user = table("user",
column("id"),
column("name"),
column("description"),
)
stmt = select([user.c.description]).where(user.c.name == 'wendy')
A :func:`.column` / :func:`.table` construct like that illustrated
above can be created in an
ad-hoc fashion and is not associated with any
:class:`.schema.MetaData`, DDL, or events, unlike its
:class:`.Table` counterpart.
:param text: the text of the element.
:param type: :class:`.types.TypeEngine` object which can associate
this :class:`.ColumnClause` with a type.
:param is_literal: if True, the :class:`.ColumnClause` is assumed to
be an exact expression that will be delivered to the output with no
quoting rules applied regardless of case sensitive settings. the
:func:`.literal_column()` function essentially invokes
:func:`.column` while passing ``is_literal=True``.
.. seealso::
:class:`.Column`
:func:`.literal_column`
:func:`.table`
:func:`.text`
:ref:`sqlexpression_literal_column`
"""
self.key = self.name = text
self.table = _selectable
self.type = type_api.to_instance(type_)
self.is_literal = is_literal
def _compare_name_for_result(self, other):
if self.is_literal or \
self.table is None or self.table._textual or \
not hasattr(other, 'proxy_set') or (
isinstance(other, ColumnClause) and
(other.is_literal or
other.table is None or
other.table._textual)
):
return (hasattr(other, 'name') and self.name == other.name) or \
(hasattr(other, '_label') and self._label == other._label)
else:
return other.proxy_set.intersection(self.proxy_set)
def _get_table(self):
return self.__dict__['table']
def _set_table(self, table):
self._memoized_property.expire_instance(self)
self.__dict__['table'] = table
table = property(_get_table, _set_table)
@_memoized_property
def _from_objects(self):
t = self.table
if t is not None:
return [t]
else:
return []
@util.memoized_property
def description(self):
if util.py3k:
return self.name
else:
return self.name.encode('ascii', 'backslashreplace')
@_memoized_property
def _key_label(self):
if self.key != self.name:
return self._gen_label(self.key)
else:
return self._label
@_memoized_property
def _label(self):
return self._gen_label(self.name)
def _gen_label(self, name):
t = self.table
if self.is_literal:
return None
elif t is not None and t.named_with_column:
if getattr(t, 'schema', None):
label = t.schema.replace('.', '_') + "_" + \
t.name + "_" + name
else:
label = t.name + "_" + name
# propagate name quoting rules for labels.
if getattr(name, "quote", None) is not None:
if isinstance(label, quoted_name):
label.quote = name.quote
else:
label = quoted_name(label, name.quote)
elif getattr(t.name, "quote", None) is not None:
# can't get this situation to occur, so let's
# assert false on it for now
assert not isinstance(label, quoted_name)
label = quoted_name(label, t.name.quote)
# ensure the label name doesn't conflict with that
# of an existing column
if label in t.c:
_label = label
counter = 1
while _label in t.c:
_label = label + "_" + str(counter)
counter += 1
label = _label
return _as_truncated(label)
else:
return name
def _bind_param(self, operator, obj):
return BindParameter(self.name, obj,
_compared_to_operator=operator,
_compared_to_type=self.type,
unique=True)
def _make_proxy(self, selectable, name=None, attach=True,
name_is_truncatable=False, **kw):
# propagate the "is_literal" flag only if we are keeping our name,
# otherwise its considered to be a label
is_literal = self.is_literal and (name is None or name == self.name)
c = self._constructor(
_as_truncated(name or self.name) if
name_is_truncatable else
(name or self.name),
type_=self.type,
_selectable=selectable,
is_literal=is_literal
)
if name is None:
c.key = self.key
c._proxies = [self]
if selectable._is_clone_of is not None:
c._is_clone_of = \
selectable._is_clone_of.columns.get(c.key)
if attach:
selectable._columns[c.key] = c
return c
class _IdentifiedClause(Executable, ClauseElement):
__visit_name__ = 'identified'
_execution_options = \
Executable._execution_options.union({'autocommit': False})
def __init__(self, ident):
self.ident = ident
class SavepointClause(_IdentifiedClause):
__visit_name__ = 'savepoint'
class RollbackToSavepointClause(_IdentifiedClause):
__visit_name__ = 'rollback_to_savepoint'
class ReleaseSavepointClause(_IdentifiedClause):
__visit_name__ = 'release_savepoint'
class quoted_name(util.text_type):
"""Represent a SQL identifier combined with quoting preferences.
:class:`.quoted_name` is a Python unicode/str subclass which
represents a particular identifier name along with a
``quote`` flag. This ``quote`` flag, when set to
``True`` or ``False``, overrides automatic quoting behavior
for this identifier in order to either unconditionally quote
or to not quote the name. If left at its default of ``None``,
quoting behavior is applied to the identifier on a per-backend basis
based on an examination of the token itself.
A :class:`.quoted_name` object with ``quote=True`` is also
prevented from being modified in the case of a so-called
"name normalize" option. Certain database backends, such as
Oracle, Firebird, and DB2 "normalize" case-insensitive names
as uppercase. The SQLAlchemy dialects for these backends
convert from SQLAlchemy's lower-case-means-insensitive convention
to the upper-case-means-insensitive conventions of those backends.
The ``quote=True`` flag here will prevent this conversion from occurring
to support an identifier that's quoted as all lower case against
such a backend.
The :class:`.quoted_name` object is normally created automatically
when specifying the name for key schema constructs such as
:class:`.Table`, :class:`.Column`, and others. The class can also be
passed explicitly as the name to any function that receives a name which
can be quoted. Such as to use the :meth:`.Engine.has_table` method with
an unconditionally quoted name::
from sqlaclchemy import create_engine
from sqlalchemy.sql.elements import quoted_name
engine = create_engine("oracle+cx_oracle://some_dsn")
engine.has_table(quoted_name("some_table", True))
The above logic will run the "has table" logic against the Oracle backend,
passing the name exactly as ``"some_table"`` without converting to
upper case.
.. versionadded:: 0.9.0
"""
def __new__(cls, value, quote):
if value is None:
return None
# experimental - don't bother with quoted_name
# if quote flag is None. doesn't seem to make any dent
# in performance however
# elif not sprcls and quote is None:
# return value
elif isinstance(value, cls) and (
quote is None or value.quote == quote
):
return value
self = super(quoted_name, cls).__new__(cls, value)
self.quote = quote
return self
def __reduce__(self):
return quoted_name, (util.text_type(self), self.quote)
@util.memoized_instancemethod
def lower(self):
if self.quote:
return self
else:
return util.text_type(self).lower()
@util.memoized_instancemethod
def upper(self):
if self.quote:
return self
else:
return util.text_type(self).upper()
def __repr__(self):
backslashed = self.encode('ascii', 'backslashreplace')
if not util.py2k:
backslashed = backslashed.decode('ascii')
return "'%s'" % backslashed
class _truncated_label(quoted_name):
"""A unicode subclass used to identify symbolic "
"names that may require truncation."""
def __new__(cls, value, quote=None):
quote = getattr(value, "quote", quote)
# return super(_truncated_label, cls).__new__(cls, value, quote, True)
return super(_truncated_label, cls).__new__(cls, value, quote)
def __reduce__(self):
return self.__class__, (util.text_type(self), self.quote)
def apply_map(self, map_):
return self
class conv(_truncated_label):
"""Mark a string indicating that a name has already been converted
by a naming convention.
This is a string subclass that indicates a name that should not be
subject to any further naming conventions.
E.g. when we create a :class:`.Constraint` using a naming convention
as follows::
m = MetaData(naming_convention={
"ck": "ck_%(table_name)s_%(constraint_name)s"
})
t = Table('t', m, Column('x', Integer),
CheckConstraint('x > 5', name='x5'))
The name of the above constraint will be rendered as ``"ck_t_x5"``.
That is, the existing name ``x5`` is used in the naming convention as the
``constraint_name`` token.
In some situations, such as in migration scripts, we may be rendering
the above :class:`.CheckConstraint` with a name that's already been
converted. In order to make sure the name isn't double-modified, the
new name is applied using the :func:`.schema.conv` marker. We can
use this explicitly as follows::
m = MetaData(naming_convention={
"ck": "ck_%(table_name)s_%(constraint_name)s"
})
t = Table('t', m, Column('x', Integer),
CheckConstraint('x > 5', name=conv('ck_t_x5')))
Where above, the :func:`.schema.conv` marker indicates that the constraint
name here is final, and the name will render as ``"ck_t_x5"`` and not
``"ck_t_ck_t_x5"``
.. versionadded:: 0.9.4
.. seealso::
:ref:`constraint_naming_conventions`
"""
class _defer_name(_truncated_label):
"""mark a name as 'deferred' for the purposes of automated name
generation.
"""
def __new__(cls, value):
if value is None:
return _NONE_NAME
elif isinstance(value, conv):
return value
else:
return super(_defer_name, cls).__new__(cls, value)
def __reduce__(self):
return self.__class__, (util.text_type(self), )
class _defer_none_name(_defer_name):
"""indicate a 'deferred' name that was ultimately the value None."""
_NONE_NAME = _defer_none_name("_unnamed_")
# for backwards compatibility in case
# someone is re-implementing the
# _truncated_identifier() sequence in a custom
# compiler
_generated_label = _truncated_label
class _anonymous_label(_truncated_label):
"""A unicode subclass used to identify anonymously
generated names."""
def __add__(self, other):
return _anonymous_label(
quoted_name(
util.text_type.__add__(self, util.text_type(other)),
self.quote)
)
def __radd__(self, other):
return _anonymous_label(
quoted_name(
util.text_type.__add__(util.text_type(other), self),
self.quote)
)
def apply_map(self, map_):
if self.quote is not None:
# preserve quoting only if necessary
return quoted_name(self % map_, self.quote)
else:
# else skip the constructor call
return self % map_
def _as_truncated(value):
"""coerce the given value to :class:`._truncated_label`.
Existing :class:`._truncated_label` and
:class:`._anonymous_label` objects are passed
unchanged.
"""
if isinstance(value, _truncated_label):
return value
else:
return _truncated_label(value)
def _string_or_unprintable(element):
if isinstance(element, util.string_types):
return element
else:
try:
return str(element)
except:
return "unprintable element %r" % element
def _expand_cloned(elements):
"""expand the given set of ClauseElements to be the set of all 'cloned'
predecessors.
"""
return itertools.chain(*[x._cloned_set for x in elements])
def _select_iterables(elements):
"""expand tables into individual columns in the
given list of column expressions.
"""
return itertools.chain(*[c._select_iterable for c in elements])
def _cloned_intersection(a, b):
"""return the intersection of sets a and b, counting
any overlap between 'cloned' predecessors.
The returned set is in terms of the entities present within 'a'.
"""
all_overlap = set(_expand_cloned(a)).intersection(_expand_cloned(b))
return set(elem for elem in a
if all_overlap.intersection(elem._cloned_set))
def _cloned_difference(a, b):
all_overlap = set(_expand_cloned(a)).intersection(_expand_cloned(b))
return set(elem for elem in a
if not all_overlap.intersection(elem._cloned_set))
def _labeled(element):
if not hasattr(element, 'name'):
return element.label(None)
else:
return element
def _is_column(col):
"""True if ``col`` is an instance of :class:`.ColumnElement`."""
return isinstance(col, ColumnElement)
def _find_columns(clause):
"""locate Column objects within the given expression."""
cols = util.column_set()
traverse(clause, {}, {'column': cols.add})
return cols
# there is some inconsistency here between the usage of
# inspect() vs. checking for Visitable and __clause_element__.
# Ideally all functions here would derive from inspect(),
# however the inspect() versions add significant callcount
# overhead for critical functions like _interpret_as_column_or_from().
# Generally, the column-based functions are more performance critical
# and are fine just checking for __clause_element__(). It is only
# _interpret_as_from() where we'd like to be able to receive ORM entities
# that have no defined namespace, hence inspect() is needed there.
def _column_as_key(element):
if isinstance(element, util.string_types):
return element
if hasattr(element, '__clause_element__'):
element = element.__clause_element__()
try:
return element.key
except AttributeError:
return None
def _clause_element_as_expr(element):
if hasattr(element, '__clause_element__'):
return element.__clause_element__()
else:
return element
def _literal_as_text(element):
if isinstance(element, Visitable):
return element
elif hasattr(element, '__clause_element__'):
return element.__clause_element__()
elif isinstance(element, util.string_types):
return TextClause(util.text_type(element))
elif isinstance(element, (util.NoneType, bool)):
return _const_expr(element)
else:
raise exc.ArgumentError(
"SQL expression object or string expected."
)
def _no_literals(element):
if hasattr(element, '__clause_element__'):
return element.__clause_element__()
elif not isinstance(element, Visitable):
raise exc.ArgumentError("Ambiguous literal: %r. Use the 'text()' "
"function to indicate a SQL expression "
"literal, or 'literal()' to indicate a "
"bound value." % element)
else:
return element
def _is_literal(element):
return not isinstance(element, Visitable) and \
not hasattr(element, '__clause_element__')
def _only_column_elements_or_none(element, name):
if element is None:
return None
else:
return _only_column_elements(element, name)
def _only_column_elements(element, name):
if hasattr(element, '__clause_element__'):
element = element.__clause_element__()
if not isinstance(element, ColumnElement):
raise exc.ArgumentError(
"Column-based expression object expected for argument "
"'%s'; got: '%s', type %s" % (name, element, type(element)))
return element
def _literal_as_binds(element, name=None, type_=None):
if hasattr(element, '__clause_element__'):
return element.__clause_element__()
elif not isinstance(element, Visitable):
if element is None:
return Null()
else:
return BindParameter(name, element, type_=type_, unique=True)
else:
return element
def _interpret_as_column_or_from(element):
if isinstance(element, Visitable):
return element
elif hasattr(element, '__clause_element__'):
return element.__clause_element__()
insp = inspection.inspect(element, raiseerr=False)
if insp is None:
if isinstance(element, (util.NoneType, bool)):
return _const_expr(element)
elif hasattr(insp, "selectable"):
return insp.selectable
return ColumnClause(str(element), is_literal=True)
def _const_expr(element):
if isinstance(element, (Null, False_, True_)):
return element
elif element is None:
return Null()
elif element is False:
return False_()
elif element is True:
return True_()
else:
raise exc.ArgumentError(
"Expected None, False, or True"
)
def _type_from_args(args):
for a in args:
if not a.type._isnull:
return a.type
else:
return type_api.NULLTYPE
def _corresponding_column_or_error(fromclause, column,
require_embedded=False):
c = fromclause.corresponding_column(column,
require_embedded=require_embedded)
if c is None:
raise exc.InvalidRequestError(
"Given column '%s', attached to table '%s', "
"failed to locate a corresponding column from table '%s'"
%
(column,
getattr(column, 'table', None),
fromclause.description)
)
return c
class AnnotatedColumnElement(Annotated):
def __init__(self, element, values):
Annotated.__init__(self, element, values)
ColumnElement.comparator._reset(self)
for attr in ('name', 'key', 'table'):
if self.__dict__.get(attr, False) is None:
self.__dict__.pop(attr)
def _with_annotations(self, values):
clone = super(AnnotatedColumnElement, self)._with_annotations(values)
ColumnElement.comparator._reset(clone)
return clone
@util.memoized_property
def name(self):
"""pull 'name' from parent, if not present"""
return self._Annotated__element.name
@util.memoized_property
def table(self):
"""pull 'table' from parent, if not present"""
return self._Annotated__element.table
@util.memoized_property
def key(self):
"""pull 'key' from parent, if not present"""
return self._Annotated__element.key
@util.memoized_property
def info(self):
return self._Annotated__element.info
@util.memoized_property
def anon_label(self):
return self._Annotated__element.anon_label
|
grepme/CMPUT410Lab01
|
virt_env/virt1/lib/python2.7/site-packages/SQLAlchemy-0.9.8-py2.7-linux-x86_64.egg/sqlalchemy/sql/elements.py
|
Python
|
apache-2.0
| 121,469
|
[
"VisIt"
] |
7098e0256c327468f0734ecc0cc0b748372066c76de9d78680f01e92d5291c66
|
# Authors: Matti Hämäläinen <msh@nmr.mgh.harvard.edu>
# Alexandre Gramfort <alexandre.gramfort@inria.fr>
#
# License: BSD-3-Clause
# Many of the computations in this code were derived from Matti Hämäläinen's
# C code.
from copy import deepcopy
from functools import partial
import os
import os.path as op
import numpy as np
from .io.constants import FIFF
from .io.meas_info import create_info, Info
from .io.tree import dir_tree_find
from .io.tag import find_tag, read_tag
from .io.open import fiff_open
from .io.write import (start_block, end_block, write_int,
write_float_sparse_rcs, write_string,
write_float_matrix, write_int_matrix,
write_coord_trans, start_file, end_file, write_id)
from .io.pick import channel_type, _picks_to_idx
from .bem import read_bem_surfaces
from .fixes import _get_img_fdata
from .surface import (read_surface, _create_surf_spacing, _get_ico_surface,
_tessellate_sphere_surf, _get_surf_neighbors,
_normalize_vectors, _triangle_neighbors, mesh_dist,
complete_surface_info, _compute_nearest, fast_cross_3d,
_CheckInside)
# keep get_mni_fiducials here just for easy backward compat
from ._freesurfer import (_get_mri_info_data, _get_atlas_values, # noqa: F401
read_freesurfer_lut, get_mni_fiducials, _check_mri)
from .utils import (get_subjects_dir, check_fname, logger, verbose, fill_doc,
_ensure_int, check_version, _get_call_line, warn,
_check_fname, _path_like, _check_sphere,
_validate_type, _check_option, _is_numeric, _pl, _suggest,
object_size, sizeof_fmt)
from .parallel import parallel_func, check_n_jobs
from .transforms import (invert_transform, apply_trans, _print_coord_trans,
combine_transforms, _get_trans,
_coord_frame_name, Transform, _str_to_frame,
_ensure_trans)
_src_kind_dict = {
'vol': 'volume',
'surf': 'surface',
'discrete': 'discrete',
}
class SourceSpaces(list):
"""Represent a list of source space.
Currently implemented as a list of dictionaries containing the source
space information
Parameters
----------
source_spaces : list
A list of dictionaries containing the source space information.
info : dict
Dictionary with information about the creation of the source space
file. Has keys 'working_dir' and 'command_line'.
Attributes
----------
info : dict
Dictionary with information about the creation of the source space
file. Has keys 'working_dir' and 'command_line'.
"""
def __init__(self, source_spaces, info=None): # noqa: D102
# First check the types is actually a valid config
_validate_type(source_spaces, list, 'source_spaces')
super(SourceSpaces, self).__init__(source_spaces) # list
self.kind # will raise an error if there is a problem
if info is None:
self.info = dict()
else:
self.info = dict(info)
@property
def kind(self):
types = list()
for si, s in enumerate(self):
_validate_type(s, dict, 'source_spaces[%d]' % (si,))
types.append(s.get('type', None))
_check_option('source_spaces[%d]["type"]' % (si,),
types[-1], ('surf', 'discrete', 'vol'))
if all(k == 'surf' for k in types[:2]):
surf_check = 2
if len(types) == 2:
kind = 'surface'
else:
kind = 'mixed'
else:
surf_check = 0
if all(k == 'discrete' for k in types):
kind = 'discrete'
else:
kind = 'volume'
if any(k == 'surf' for k in types[surf_check:]):
raise RuntimeError('Invalid source space with kinds %s' % (types,))
return kind
@verbose
def plot(self, head=False, brain=None, skull=None, subjects_dir=None,
trans=None, verbose=None):
"""Plot the source space.
Parameters
----------
head : bool
If True, show head surface.
brain : bool | str
If True, show the brain surfaces. Can also be a str for
surface type (e.g., 'pial', same as True). Default is None,
which means 'white' for surface source spaces and False otherwise.
skull : bool | str | list of str | list of dict | None
Whether to plot skull surface. If string, common choices would be
'inner_skull', or 'outer_skull'. Can also be a list to plot
multiple skull surfaces. If a list of dicts, each dict must
contain the complete surface info (such as you get from
:func:`mne.make_bem_model`). True is an alias of 'outer_skull'.
The subjects bem and bem/flash folders are searched for the 'surf'
files. Defaults to None, which is False for surface source spaces,
and True otherwise.
subjects_dir : str | None
Path to SUBJECTS_DIR if it is not set in the environment.
trans : str | 'auto' | dict | None
The full path to the head<->MRI transform ``*-trans.fif`` file
produced during coregistration. If trans is None, an identity
matrix is assumed. This is only needed when the source space is in
head coordinates.
%(verbose_meth)s
Returns
-------
fig : instance of mayavi.mlab.Figure
The figure.
"""
from .viz import plot_alignment
surfaces = list()
bem = None
if brain is None:
brain = 'white' if any(ss['type'] == 'surf'
for ss in self) else False
if isinstance(brain, str):
surfaces.append(brain)
elif brain:
surfaces.append('brain')
if skull is None:
skull = False if self.kind == 'surface' else True
if isinstance(skull, str):
surfaces.append(skull)
elif skull is True:
surfaces.append('outer_skull')
elif skull is not False: # list
if isinstance(skull[0], dict): # bem
skull_map = {FIFF.FIFFV_BEM_SURF_ID_BRAIN: 'inner_skull',
FIFF.FIFFV_BEM_SURF_ID_SKULL: 'outer_skull',
FIFF.FIFFV_BEM_SURF_ID_HEAD: 'outer_skin'}
for this_skull in skull:
surfaces.append(skull_map[this_skull['id']])
bem = skull
else: # list of str
for surf in skull:
surfaces.append(surf)
if head:
surfaces.append('head')
if self[0]['coord_frame'] == FIFF.FIFFV_COORD_HEAD:
coord_frame = 'head'
if trans is None:
raise ValueError('Source space is in head coordinates, but no '
'head<->MRI transform was given. Please '
'specify the full path to the appropriate '
'*-trans.fif file as the "trans" parameter.')
else:
coord_frame = 'mri'
info = create_info(0, 1000., 'eeg')
return plot_alignment(
info, trans=trans, subject=self._subject,
subjects_dir=subjects_dir, surfaces=surfaces,
coord_frame=coord_frame, meg=(), eeg=False, dig=False, ecog=False,
bem=bem, src=self
)
def __getitem__(self, *args, **kwargs):
"""Get an item."""
out = super().__getitem__(*args, **kwargs)
if isinstance(out, list):
out = SourceSpaces(out)
return out
def __repr__(self): # noqa: D105
ss_repr = []
extra = []
for si, ss in enumerate(self):
ss_type = ss['type']
r = _src_kind_dict[ss_type]
if ss_type == 'vol':
if 'seg_name' in ss:
r += " (%s)" % (ss['seg_name'],)
else:
r += ", shape=%s" % (ss['shape'],)
elif ss_type == 'surf':
r += (" (%s), n_vertices=%i" % (_get_hemi(ss)[0], ss['np']))
r += ', n_used=%i' % (ss['nuse'],)
if si == 0:
extra += ['%s coords'
% (_coord_frame_name(int(ss['coord_frame'])))]
ss_repr.append('<%s>' % r)
subj = self._subject
if subj is not None:
extra += ['subject %r' % (subj,)]
sz = object_size(self)
if sz is not None:
extra += [f'~{sizeof_fmt(sz)}']
return "<SourceSpaces: [%s] %s>" % (
', '.join(ss_repr), ', '.join(extra))
@property
def _subject(self):
return self[0].get('subject_his_id', None)
def __add__(self, other):
"""Combine source spaces."""
out = self.copy()
out += other
return SourceSpaces(out)
def copy(self):
"""Make a copy of the source spaces.
Returns
-------
src : instance of SourceSpaces
The copied source spaces.
"""
return deepcopy(self)
def __deepcopy__(self, memodict):
"""Make a deepcopy."""
# don't copy read-only views (saves a ton of mem for split-vol src)
info = deepcopy(self.info, memodict)
ss = list()
for s in self:
for key in ('rr', 'nn'):
if key in s:
arr = s[key]
id_ = id(arr)
if id_ not in memodict:
if not arr.flags.writeable:
memodict[id_] = arr
ss.append(deepcopy(s, memodict))
return SourceSpaces(ss, info)
@verbose
def save(self, fname, overwrite=False, *, verbose=None):
"""Save the source spaces to a fif file.
Parameters
----------
fname : str
File to write.
%(overwrite)s
%(verbose_meth)s
"""
write_source_spaces(fname, self, overwrite)
@verbose
def export_volume(self, fname, include_surfaces=True,
include_discrete=True, dest='mri', trans=None,
mri_resolution=False, use_lut=True, overwrite=False,
verbose=None):
"""Export source spaces to nifti or mgz file.
Parameters
----------
fname : str
Name of nifti or mgz file to write.
include_surfaces : bool
If True, include surface source spaces.
include_discrete : bool
If True, include discrete source spaces.
dest : 'mri' | 'surf'
If 'mri' the volume is defined in the coordinate system of the
original T1 image. If 'surf' the coordinate system of the
FreeSurfer surface is used (Surface RAS).
trans : dict, str, or None
Either a transformation filename (usually made using mne_analyze)
or an info dict (usually opened using read_trans()). If string, an
ending of ``.fif`` or ``.fif.gz`` will be assumed to be in FIF
format, any other ending will be assumed to be a text file with a
4x4 transformation matrix (like the ``--trans`` MNE-C option.
Must be provided if source spaces are in head coordinates and
include_surfaces and mri_resolution are True.
mri_resolution : bool | str
If True, the image is saved in MRI resolution
(e.g. 256 x 256 x 256), and each source region (surface or
segmentation volume) filled in completely. If "sparse", only a
single voxel in the high-resolution MRI is filled in for each
source point.
.. versionchanged:: 0.21.0
Support for "sparse" was added.
use_lut : bool
If True, assigns a numeric value to each source space that
corresponds to a color on the freesurfer lookup table.
%(overwrite)s
.. versionadded:: 0.19
%(verbose_meth)s
Notes
-----
This method requires nibabel.
"""
_check_fname(fname, overwrite)
_validate_type(mri_resolution, (bool, str), 'mri_resolution')
if isinstance(mri_resolution, str):
_check_option('mri_resolution', mri_resolution, ["sparse"],
extra='when mri_resolution is a string')
else:
mri_resolution = bool(mri_resolution)
fname = str(fname)
# import nibabel or raise error
try:
import nibabel as nib
except ImportError:
raise ImportError('This function requires nibabel.')
# Check coordinate frames of each source space
coord_frames = np.array([s['coord_frame'] for s in self])
# Raise error if trans is not provided when head coordinates are used
# and mri_resolution and include_surfaces are true
if (coord_frames == FIFF.FIFFV_COORD_HEAD).all():
coords = 'head' # all sources in head coordinates
if mri_resolution and include_surfaces:
if trans is None:
raise ValueError('trans containing mri to head transform '
'must be provided if mri_resolution and '
'include_surfaces are true and surfaces '
'are in head coordinates')
elif trans is not None:
logger.info('trans is not needed and will not be used unless '
'include_surfaces and mri_resolution are True.')
elif (coord_frames == FIFF.FIFFV_COORD_MRI).all():
coords = 'mri' # all sources in mri coordinates
if trans is not None:
logger.info('trans is not needed and will not be used unless '
'sources are in head coordinates.')
# Raise error if all sources are not in the same space, or sources are
# not in mri or head coordinates
else:
raise ValueError('All sources must be in head coordinates or all '
'sources must be in mri coordinates.')
# use lookup table to assign values to source spaces
logger.info('Reading FreeSurfer lookup table')
# read the lookup table
lut, _ = read_freesurfer_lut()
# Setup a dictionary of source types
src_types = dict(volume=[], surface_discrete=[])
# Populate dictionary of source types
for src in self:
# volume sources
if src['type'] == 'vol':
src_types['volume'].append(src)
# surface and discrete sources
elif src['type'] in ('surf', 'discrete'):
src_types['surface_discrete'].append(src)
else:
raise ValueError('Unrecognized source type: %s.' % src['type'])
# Raise error if there are no volume source spaces
if len(src_types['volume']) == 0:
raise ValueError('Source spaces must contain at least one volume.')
# Get shape, inuse array and interpolation matrix from volume sources
src = src_types['volume'][0]
aseg_data = None
if mri_resolution:
# read the mri file used to generate volumes
if mri_resolution is True:
aseg_data = _get_img_fdata(nib.load(src['mri_file']))
# get the voxel space shape
shape3d = (src['mri_width'], src['mri_depth'],
src['mri_height'])
else:
# get the volume source space shape
# read the shape in reverse order
# (otherwise results are scrambled)
shape3d = src['shape']
# calculate affine transform for image (MRI_VOXEL to RAS)
if mri_resolution:
# MRI_VOXEL to MRI transform
transform = src['vox_mri_t']
else:
# MRI_VOXEL to MRI transform
# NOTE: 'src' indicates downsampled version of MRI_VOXEL
transform = src['src_mri_t']
# Figure out how to get from our input source space to output voxels
fro_dst_t = invert_transform(transform)
dest = transform['to']
if coords == 'head':
head_mri_t = _get_trans(trans, 'head', 'mri')[0]
fro_dst_t = combine_transforms(head_mri_t, fro_dst_t, 'head', dest)
else:
fro_dst_t = fro_dst_t
# Fill in the volumes
img = np.zeros(shape3d)
for ii, vs in enumerate(src_types['volume']):
# read the lookup table value for segmented volume
if 'seg_name' not in vs:
raise ValueError('Volume sources should be segments, '
'not the entire volume.')
# find the color value for this volume
use_id = 1.
if mri_resolution is True or use_lut:
id_ = lut[vs['seg_name']]
if use_lut:
use_id = id_
if mri_resolution == 'sparse':
idx = apply_trans(fro_dst_t, vs['rr'][vs['vertno']])
idx = tuple(idx.round().astype(int).T)
elif mri_resolution is True: # fill the represented vol
# get the values for this volume
idx = (aseg_data == id_)
else:
assert mri_resolution is False
idx = vs['inuse'].reshape(shape3d, order='F').astype(bool)
img[idx] = use_id
# loop through the surface and discrete source spaces
# get the surface names (assumes left, right order. may want
# to add these names during source space generation
for src in src_types['surface_discrete']:
val = 1
if src['type'] == 'surf':
if not include_surfaces:
continue
if use_lut:
surf_name = {
FIFF.FIFFV_MNE_SURF_LEFT_HEMI: 'Left',
FIFF.FIFFV_MNE_SURF_RIGHT_HEMI: 'Right',
}[src['id']] + '-Cerebral-Cortex'
val = lut[surf_name]
else:
assert src['type'] == 'discrete'
if not include_discrete:
continue
if use_lut:
logger.info('Discrete sources do not have values on '
'the lookup table. Defaulting to 1.')
# convert vertex positions from their native space
# (either HEAD or MRI) to MRI_VOXEL space
if mri_resolution is True:
use_rr = src['rr']
else:
assert mri_resolution is False or mri_resolution == 'sparse'
use_rr = src['rr'][src['vertno']]
srf_vox = apply_trans(fro_dst_t['trans'], use_rr)
# convert to numeric indices
ix_, iy_, iz_ = srf_vox.T.round().astype(int)
# clip indices outside of volume space
ix = np.clip(ix_, 0, shape3d[0] - 1),
iy = np.clip(iy_, 0, shape3d[1] - 1)
iz = np.clip(iz_, 0, shape3d[2] - 1)
# compare original and clipped indices
n_diff = ((ix_ != ix) | (iy_ != iy) | (iz_ != iz)).sum()
# generate use warnings for clipping
if n_diff > 0:
warn(f'{n_diff} {src["type"]} vertices lay outside of volume '
f'space. Consider using a larger volume space.')
# get surface id or use default value
# update image to include surface voxels
img[ix, iy, iz] = val
if dest == 'mri':
# combine with MRI to RAS transform
transform = combine_transforms(
transform, vs['mri_ras_t'],
transform['from'], vs['mri_ras_t']['to'])
# now setup the affine for volume image
affine = transform['trans'].copy()
# make sure affine converts from m to mm
affine[:3] *= 1e3
# setup image for file
if fname.endswith(('.nii', '.nii.gz')): # save as nifit
# setup the nifti header
hdr = nib.Nifti1Header()
hdr.set_xyzt_units('mm')
# save the nifti image
img = nib.Nifti1Image(img, affine, header=hdr)
elif fname.endswith('.mgz'): # save as mgh
# convert to float32 (float64 not currently supported)
img = img.astype('float32')
# save the mgh image
img = nib.freesurfer.mghformat.MGHImage(img, affine)
else:
raise(ValueError('Unrecognized file extension'))
# write image to file
nib.save(img, fname)
def _add_patch_info(s):
"""Patch information in a source space.
Generate the patch information from the 'nearest' vector in
a source space. For vertex in the source space it provides
the list of neighboring vertices in the high resolution
triangulation.
Parameters
----------
s : dict
The source space.
"""
nearest = s['nearest']
if nearest is None:
s['pinfo'] = None
s['patch_inds'] = None
return
logger.info(' Computing patch statistics...')
indn = np.argsort(nearest)
nearest_sorted = nearest[indn]
steps = np.where(nearest_sorted[1:] != nearest_sorted[:-1])[0] + 1
starti = np.r_[[0], steps]
stopi = np.r_[steps, [len(nearest)]]
pinfo = list()
for start, stop in zip(starti, stopi):
pinfo.append(np.sort(indn[start:stop]))
s['pinfo'] = pinfo
# compute patch indices of the in-use source space vertices
patch_verts = nearest_sorted[steps - 1]
s['patch_inds'] = np.searchsorted(patch_verts, s['vertno'])
logger.info(' Patch information added...')
@verbose
def _read_source_spaces_from_tree(fid, tree, patch_stats=False, verbose=None):
"""Read the source spaces from a FIF file.
Parameters
----------
fid : file descriptor
An open file descriptor.
tree : dict
The FIF tree structure if source is a file id.
patch_stats : bool, optional (default False)
Calculate and add cortical patch statistics to the surfaces.
%(verbose)s
Returns
-------
src : SourceSpaces
The source spaces.
"""
# Find all source spaces
spaces = dir_tree_find(tree, FIFF.FIFFB_MNE_SOURCE_SPACE)
if len(spaces) == 0:
raise ValueError('No source spaces found')
src = list()
for s in spaces:
logger.info(' Reading a source space...')
this = _read_one_source_space(fid, s)
logger.info(' [done]')
if patch_stats:
_complete_source_space_info(this)
src.append(this)
logger.info(' %d source spaces read' % len(spaces))
return SourceSpaces(src)
@verbose
def read_source_spaces(fname, patch_stats=False, verbose=None):
"""Read the source spaces from a FIF file.
Parameters
----------
fname : str
The name of the file, which should end with -src.fif or
-src.fif.gz.
patch_stats : bool, optional (default False)
Calculate and add cortical patch statistics to the surfaces.
%(verbose)s
Returns
-------
src : SourceSpaces
The source spaces.
See Also
--------
write_source_spaces, setup_source_space, setup_volume_source_space
"""
# be more permissive on read than write (fwd/inv can contain src)
check_fname(fname, 'source space', ('-src.fif', '-src.fif.gz',
'_src.fif', '_src.fif.gz',
'-fwd.fif', '-fwd.fif.gz',
'_fwd.fif', '_fwd.fif.gz',
'-inv.fif', '-inv.fif.gz',
'_inv.fif', '_inv.fif.gz'))
ff, tree, _ = fiff_open(fname)
with ff as fid:
src = _read_source_spaces_from_tree(fid, tree, patch_stats=patch_stats,
verbose=verbose)
src.info['fname'] = fname
node = dir_tree_find(tree, FIFF.FIFFB_MNE_ENV)
if node:
node = node[0]
for p in range(node['nent']):
kind = node['directory'][p].kind
pos = node['directory'][p].pos
tag = read_tag(fid, pos)
if kind == FIFF.FIFF_MNE_ENV_WORKING_DIR:
src.info['working_dir'] = tag.data
elif kind == FIFF.FIFF_MNE_ENV_COMMAND_LINE:
src.info['command_line'] = tag.data
return src
def _read_one_source_space(fid, this):
"""Read one source space."""
res = dict()
tag = find_tag(fid, this, FIFF.FIFF_MNE_SOURCE_SPACE_ID)
if tag is None:
res['id'] = int(FIFF.FIFFV_MNE_SURF_UNKNOWN)
else:
res['id'] = int(tag.data)
tag = find_tag(fid, this, FIFF.FIFF_MNE_SOURCE_SPACE_TYPE)
if tag is None:
raise ValueError('Unknown source space type')
else:
src_type = int(tag.data)
if src_type == FIFF.FIFFV_MNE_SPACE_SURFACE:
res['type'] = 'surf'
elif src_type == FIFF.FIFFV_MNE_SPACE_VOLUME:
res['type'] = 'vol'
elif src_type == FIFF.FIFFV_MNE_SPACE_DISCRETE:
res['type'] = 'discrete'
else:
raise ValueError('Unknown source space type (%d)' % src_type)
if res['type'] == 'vol':
tag = find_tag(fid, this, FIFF.FIFF_MNE_SOURCE_SPACE_VOXEL_DIMS)
if tag is not None:
res['shape'] = tuple(tag.data)
tag = find_tag(fid, this, FIFF.FIFF_COORD_TRANS)
if tag is not None:
res['src_mri_t'] = tag.data
parent_mri = dir_tree_find(this, FIFF.FIFFB_MNE_PARENT_MRI_FILE)
if len(parent_mri) == 0:
# MNE 2.7.3 (and earlier) didn't store necessary information
# about volume coordinate translations. Although there is a
# FFIF_COORD_TRANS in the higher level of the FIFF file, this
# doesn't contain all the info we need. Safer to return an
# error unless a user really wants us to add backward compat.
raise ValueError('Can not find parent MRI location. The volume '
'source space may have been made with an MNE '
'version that is too old (<= 2.7.3). Consider '
'updating and regenerating the inverse.')
mri = parent_mri[0]
for d in mri['directory']:
if d.kind == FIFF.FIFF_COORD_TRANS:
tag = read_tag(fid, d.pos)
trans = tag.data
if trans['from'] == FIFF.FIFFV_MNE_COORD_MRI_VOXEL:
res['vox_mri_t'] = tag.data
if trans['to'] == FIFF.FIFFV_MNE_COORD_RAS:
res['mri_ras_t'] = tag.data
tag = find_tag(fid, mri, FIFF.FIFF_MNE_SOURCE_SPACE_INTERPOLATOR)
if tag is not None:
res['interpolator'] = tag.data
if tag.data.data.size == 0:
del res['interpolator']
else:
logger.info("Interpolation matrix for MRI not found.")
tag = find_tag(fid, mri, FIFF.FIFF_MNE_SOURCE_SPACE_MRI_FILE)
if tag is not None:
res['mri_file'] = tag.data
tag = find_tag(fid, mri, FIFF.FIFF_MRI_WIDTH)
if tag is not None:
res['mri_width'] = int(tag.data)
tag = find_tag(fid, mri, FIFF.FIFF_MRI_HEIGHT)
if tag is not None:
res['mri_height'] = int(tag.data)
tag = find_tag(fid, mri, FIFF.FIFF_MRI_DEPTH)
if tag is not None:
res['mri_depth'] = int(tag.data)
tag = find_tag(fid, mri, FIFF.FIFF_MNE_FILE_NAME)
if tag is not None:
res['mri_volume_name'] = tag.data
tag = find_tag(fid, this, FIFF.FIFF_MNE_SOURCE_SPACE_NNEIGHBORS)
if tag is not None:
nneighbors = tag.data
tag = find_tag(fid, this, FIFF.FIFF_MNE_SOURCE_SPACE_NEIGHBORS)
offset = 0
neighbors = []
for n in nneighbors:
neighbors.append(tag.data[offset:offset + n])
offset += n
res['neighbor_vert'] = neighbors
tag = find_tag(fid, this, FIFF.FIFF_COMMENT)
if tag is not None:
res['seg_name'] = tag.data
tag = find_tag(fid, this, FIFF.FIFF_MNE_SOURCE_SPACE_NPOINTS)
if tag is None:
raise ValueError('Number of vertices not found')
res['np'] = int(tag.data)
tag = find_tag(fid, this, FIFF.FIFF_BEM_SURF_NTRI)
if tag is None:
tag = find_tag(fid, this, FIFF.FIFF_MNE_SOURCE_SPACE_NTRI)
if tag is None:
res['ntri'] = 0
else:
res['ntri'] = int(tag.data)
else:
res['ntri'] = tag.data
tag = find_tag(fid, this, FIFF.FIFF_MNE_COORD_FRAME)
if tag is None:
raise ValueError('Coordinate frame information not found')
res['coord_frame'] = tag.data[0]
# Vertices, normals, and triangles
tag = find_tag(fid, this, FIFF.FIFF_MNE_SOURCE_SPACE_POINTS)
if tag is None:
raise ValueError('Vertex data not found')
res['rr'] = tag.data.astype(np.float64) # double precision for mayavi
if res['rr'].shape[0] != res['np']:
raise ValueError('Vertex information is incorrect')
tag = find_tag(fid, this, FIFF.FIFF_MNE_SOURCE_SPACE_NORMALS)
if tag is None:
raise ValueError('Vertex normals not found')
res['nn'] = tag.data.copy()
if res['nn'].shape[0] != res['np']:
raise ValueError('Vertex normal information is incorrect')
if res['ntri'] > 0:
tag = find_tag(fid, this, FIFF.FIFF_BEM_SURF_TRIANGLES)
if tag is None:
tag = find_tag(fid, this, FIFF.FIFF_MNE_SOURCE_SPACE_TRIANGLES)
if tag is None:
raise ValueError('Triangulation not found')
else:
res['tris'] = tag.data - 1 # index start at 0 in Python
else:
res['tris'] = tag.data - 1 # index start at 0 in Python
if res['tris'].shape[0] != res['ntri']:
raise ValueError('Triangulation information is incorrect')
else:
res['tris'] = None
# Which vertices are active
tag = find_tag(fid, this, FIFF.FIFF_MNE_SOURCE_SPACE_NUSE)
if tag is None:
res['nuse'] = 0
res['inuse'] = np.zeros(res['nuse'], dtype=np.int64)
res['vertno'] = None
else:
res['nuse'] = int(tag.data)
tag = find_tag(fid, this, FIFF.FIFF_MNE_SOURCE_SPACE_SELECTION)
if tag is None:
raise ValueError('Source selection information missing')
res['inuse'] = tag.data.astype(np.int64).T
if len(res['inuse']) != res['np']:
raise ValueError('Incorrect number of entries in source space '
'selection')
res['vertno'] = np.where(res['inuse'])[0]
# Use triangulation
tag1 = find_tag(fid, this, FIFF.FIFF_MNE_SOURCE_SPACE_NUSE_TRI)
tag2 = find_tag(fid, this, FIFF.FIFF_MNE_SOURCE_SPACE_USE_TRIANGLES)
if tag1 is None or tag2 is None:
res['nuse_tri'] = 0
res['use_tris'] = None
else:
res['nuse_tri'] = tag1.data
res['use_tris'] = tag2.data - 1 # index start at 0 in Python
# Patch-related information
tag1 = find_tag(fid, this, FIFF.FIFF_MNE_SOURCE_SPACE_NEAREST)
tag2 = find_tag(fid, this, FIFF.FIFF_MNE_SOURCE_SPACE_NEAREST_DIST)
if tag1 is None or tag2 is None:
res['nearest'] = None
res['nearest_dist'] = None
else:
res['nearest'] = tag1.data
res['nearest_dist'] = tag2.data.T
_add_patch_info(res)
# Distances
tag1 = find_tag(fid, this, FIFF.FIFF_MNE_SOURCE_SPACE_DIST)
tag2 = find_tag(fid, this, FIFF.FIFF_MNE_SOURCE_SPACE_DIST_LIMIT)
if tag1 is None or tag2 is None:
res['dist'] = None
res['dist_limit'] = None
else:
res['dist'] = tag1.data
res['dist_limit'] = tag2.data
# Add the upper triangle
res['dist'] = res['dist'] + res['dist'].T
if (res['dist'] is not None):
logger.info(' Distance information added...')
tag = find_tag(fid, this, FIFF.FIFF_SUBJ_HIS_ID)
if tag is None:
res['subject_his_id'] = None
else:
res['subject_his_id'] = tag.data
return res
@verbose
def _complete_source_space_info(this, verbose=None):
"""Add more info on surface."""
# Main triangulation
logger.info(' Completing triangulation info...')
this['tri_area'] = np.zeros(this['ntri'])
r1 = this['rr'][this['tris'][:, 0], :]
r2 = this['rr'][this['tris'][:, 1], :]
r3 = this['rr'][this['tris'][:, 2], :]
this['tri_cent'] = (r1 + r2 + r3) / 3.0
this['tri_nn'] = fast_cross_3d((r2 - r1), (r3 - r1))
this['tri_area'] = _normalize_vectors(this['tri_nn']) / 2.0
logger.info('[done]')
# Selected triangles
logger.info(' Completing selection triangulation info...')
if this['nuse_tri'] > 0:
r1 = this['rr'][this['use_tris'][:, 0], :]
r2 = this['rr'][this['use_tris'][:, 1], :]
r3 = this['rr'][this['use_tris'][:, 2], :]
this['use_tri_cent'] = (r1 + r2 + r3) / 3.0
this['use_tri_nn'] = fast_cross_3d((r2 - r1), (r3 - r1))
this['use_tri_area'] = np.linalg.norm(this['use_tri_nn'], axis=1) / 2.
logger.info('[done]')
def find_source_space_hemi(src):
"""Return the hemisphere id for a source space.
Parameters
----------
src : dict
The source space to investigate.
Returns
-------
hemi : int
Deduced hemisphere id.
"""
xave = src['rr'][:, 0].sum()
if xave < 0:
hemi = int(FIFF.FIFFV_MNE_SURF_LEFT_HEMI)
else:
hemi = int(FIFF.FIFFV_MNE_SURF_RIGHT_HEMI)
return hemi
def label_src_vertno_sel(label, src):
"""Find vertex numbers and indices from label.
Parameters
----------
label : Label
Source space label.
src : dict
Source space.
Returns
-------
vertices : list of length 2
Vertex numbers for lh and rh.
src_sel : array of int (len(idx) = len(vertices[0]) + len(vertices[1]))
Indices of the selected vertices in sourse space.
"""
if src[0]['type'] != 'surf':
return Exception('Labels are only supported with surface source '
'spaces')
vertno = [src[0]['vertno'], src[1]['vertno']]
if label.hemi == 'lh':
vertno_sel = np.intersect1d(vertno[0], label.vertices)
src_sel = np.searchsorted(vertno[0], vertno_sel)
vertno[0] = vertno_sel
vertno[1] = np.array([], int)
elif label.hemi == 'rh':
vertno_sel = np.intersect1d(vertno[1], label.vertices)
src_sel = np.searchsorted(vertno[1], vertno_sel) + len(vertno[0])
vertno[0] = np.array([], int)
vertno[1] = vertno_sel
elif label.hemi == 'both':
vertno_sel_lh = np.intersect1d(vertno[0], label.lh.vertices)
src_sel_lh = np.searchsorted(vertno[0], vertno_sel_lh)
vertno_sel_rh = np.intersect1d(vertno[1], label.rh.vertices)
src_sel_rh = np.searchsorted(vertno[1], vertno_sel_rh) + len(vertno[0])
src_sel = np.hstack((src_sel_lh, src_sel_rh))
vertno = [vertno_sel_lh, vertno_sel_rh]
else:
raise Exception("Unknown hemisphere type")
return vertno, src_sel
def _get_vertno(src):
return [s['vertno'] for s in src]
###############################################################################
# Write routines
@verbose
def _write_source_spaces_to_fid(fid, src, verbose=None):
"""Write the source spaces to a FIF file.
Parameters
----------
fid : file descriptor
An open file descriptor.
src : list
The list of source spaces.
%(verbose)s
"""
for s in src:
logger.info(' Write a source space...')
start_block(fid, FIFF.FIFFB_MNE_SOURCE_SPACE)
_write_one_source_space(fid, s, verbose)
end_block(fid, FIFF.FIFFB_MNE_SOURCE_SPACE)
logger.info(' [done]')
logger.info(' %d source spaces written' % len(src))
@verbose
def write_source_spaces(fname, src, overwrite=False, verbose=None):
"""Write source spaces to a file.
Parameters
----------
fname : str
The name of the file, which should end with -src.fif or
-src.fif.gz.
src : SourceSpaces
The source spaces (as returned by read_source_spaces).
%(overwrite)s
%(verbose)s
See Also
--------
read_source_spaces
"""
check_fname(fname, 'source space', ('-src.fif', '-src.fif.gz',
'_src.fif', '_src.fif.gz'))
_check_fname(fname, overwrite=overwrite)
fid = start_file(fname)
start_block(fid, FIFF.FIFFB_MNE)
if src.info:
start_block(fid, FIFF.FIFFB_MNE_ENV)
write_id(fid, FIFF.FIFF_BLOCK_ID)
data = src.info.get('working_dir', None)
if data:
write_string(fid, FIFF.FIFF_MNE_ENV_WORKING_DIR, data)
data = src.info.get('command_line', None)
if data:
write_string(fid, FIFF.FIFF_MNE_ENV_COMMAND_LINE, data)
end_block(fid, FIFF.FIFFB_MNE_ENV)
_write_source_spaces_to_fid(fid, src, verbose)
end_block(fid, FIFF.FIFFB_MNE)
end_file(fid)
def _write_one_source_space(fid, this, verbose=None):
"""Write one source space."""
from scipy import sparse
if this['type'] == 'surf':
src_type = FIFF.FIFFV_MNE_SPACE_SURFACE
elif this['type'] == 'vol':
src_type = FIFF.FIFFV_MNE_SPACE_VOLUME
elif this['type'] == 'discrete':
src_type = FIFF.FIFFV_MNE_SPACE_DISCRETE
else:
raise ValueError('Unknown source space type (%s)' % this['type'])
write_int(fid, FIFF.FIFF_MNE_SOURCE_SPACE_TYPE, src_type)
if this['id'] >= 0:
write_int(fid, FIFF.FIFF_MNE_SOURCE_SPACE_ID, this['id'])
data = this.get('subject_his_id', None)
if data:
write_string(fid, FIFF.FIFF_SUBJ_HIS_ID, data)
write_int(fid, FIFF.FIFF_MNE_COORD_FRAME, this['coord_frame'])
write_int(fid, FIFF.FIFF_MNE_SOURCE_SPACE_NPOINTS, this['np'])
write_float_matrix(fid, FIFF.FIFF_MNE_SOURCE_SPACE_POINTS, this['rr'])
write_float_matrix(fid, FIFF.FIFF_MNE_SOURCE_SPACE_NORMALS, this['nn'])
# Which vertices are active
write_int(fid, FIFF.FIFF_MNE_SOURCE_SPACE_SELECTION, this['inuse'])
write_int(fid, FIFF.FIFF_MNE_SOURCE_SPACE_NUSE, this['nuse'])
if this['ntri'] > 0:
write_int(fid, FIFF.FIFF_MNE_SOURCE_SPACE_NTRI, this['ntri'])
write_int_matrix(fid, FIFF.FIFF_MNE_SOURCE_SPACE_TRIANGLES,
this['tris'] + 1)
if this['type'] != 'vol' and this['use_tris'] is not None:
# Use triangulation
write_int(fid, FIFF.FIFF_MNE_SOURCE_SPACE_NUSE_TRI, this['nuse_tri'])
write_int_matrix(fid, FIFF.FIFF_MNE_SOURCE_SPACE_USE_TRIANGLES,
this['use_tris'] + 1)
if this['type'] == 'vol':
neighbor_vert = this.get('neighbor_vert', None)
if neighbor_vert is not None:
nneighbors = np.array([len(n) for n in neighbor_vert])
neighbors = np.concatenate(neighbor_vert)
write_int(fid, FIFF.FIFF_MNE_SOURCE_SPACE_NNEIGHBORS, nneighbors)
write_int(fid, FIFF.FIFF_MNE_SOURCE_SPACE_NEIGHBORS, neighbors)
write_coord_trans(fid, this['src_mri_t'])
write_int(fid, FIFF.FIFF_MNE_SOURCE_SPACE_VOXEL_DIMS, this['shape'])
start_block(fid, FIFF.FIFFB_MNE_PARENT_MRI_FILE)
write_coord_trans(fid, this['mri_ras_t'])
write_coord_trans(fid, this['vox_mri_t'])
mri_volume_name = this.get('mri_volume_name', None)
if mri_volume_name is not None:
write_string(fid, FIFF.FIFF_MNE_FILE_NAME, mri_volume_name)
mri_width, mri_height, mri_depth, nvox = _src_vol_dims(this)
interpolator = this.get('interpolator')
if interpolator is None:
interpolator = sparse.csr_matrix((nvox, this['np']))
write_float_sparse_rcs(fid, FIFF.FIFF_MNE_SOURCE_SPACE_INTERPOLATOR,
interpolator)
if 'mri_file' in this and this['mri_file'] is not None:
write_string(fid, FIFF.FIFF_MNE_SOURCE_SPACE_MRI_FILE,
this['mri_file'])
write_int(fid, FIFF.FIFF_MRI_WIDTH, mri_width)
write_int(fid, FIFF.FIFF_MRI_HEIGHT, mri_height)
write_int(fid, FIFF.FIFF_MRI_DEPTH, mri_depth)
end_block(fid, FIFF.FIFFB_MNE_PARENT_MRI_FILE)
# Patch-related information
if this['nearest'] is not None:
write_int(fid, FIFF.FIFF_MNE_SOURCE_SPACE_NEAREST, this['nearest'])
write_float_matrix(fid, FIFF.FIFF_MNE_SOURCE_SPACE_NEAREST_DIST,
this['nearest_dist'])
# Distances
if this['dist'] is not None:
# Save only upper triangular portion of the matrix
dists = this['dist'].copy()
dists = sparse.triu(dists, format=dists.format)
write_float_sparse_rcs(fid, FIFF.FIFF_MNE_SOURCE_SPACE_DIST, dists)
write_float_matrix(fid, FIFF.FIFF_MNE_SOURCE_SPACE_DIST_LIMIT,
this['dist_limit'])
# Segmentation data
if this['type'] == 'vol' and ('seg_name' in this):
# Save the name of the segment
write_string(fid, FIFF.FIFF_COMMENT, this['seg_name'])
###############################################################################
# Creation and decimation
@verbose
def _check_spacing(spacing, verbose=None):
"""Check spacing parameter."""
# check to make sure our parameters are good, parse 'spacing'
types = ('a string with values "ico#", "oct#", "all", or an int >= 2')
space_err = ('"spacing" must be %s, got type %s (%r)'
% (types, type(spacing), spacing))
if isinstance(spacing, str):
if spacing == 'all':
stype = 'all'
sval = ''
elif isinstance(spacing, str) and spacing[:3] in ('ico', 'oct'):
stype = spacing[:3]
sval = spacing[3:]
try:
sval = int(sval)
except Exception:
raise ValueError('%s subdivision must be an integer, got %r'
% (stype, sval))
lim = 0 if stype == 'ico' else 1
if sval < lim:
raise ValueError('%s subdivision must be >= %s, got %s'
% (stype, lim, sval))
else:
raise ValueError(space_err)
else:
stype = 'spacing'
sval = _ensure_int(spacing, 'spacing', types)
if sval < 2:
raise ValueError('spacing must be >= 2, got %d' % (sval,))
if stype == 'all':
logger.info('Include all vertices')
ico_surf = None
src_type_str = 'all'
else:
src_type_str = '%s = %s' % (stype, sval)
if stype == 'ico':
logger.info('Icosahedron subdivision grade %s' % sval)
ico_surf = _get_ico_surface(sval)
elif stype == 'oct':
logger.info('Octahedron subdivision grade %s' % sval)
ico_surf = _tessellate_sphere_surf(sval)
else:
assert stype == 'spacing'
logger.info('Approximate spacing %s mm' % sval)
ico_surf = sval
return stype, sval, ico_surf, src_type_str
@verbose
def setup_source_space(subject, spacing='oct6', surface='white',
subjects_dir=None, add_dist=True, n_jobs=1,
verbose=None):
"""Set up bilateral hemisphere surface-based source space with subsampling.
Parameters
----------
%(subject)s
spacing : str
The spacing to use. Can be ``'ico#'`` for a recursively subdivided
icosahedron, ``'oct#'`` for a recursively subdivided octahedron,
``'all'`` for all points, or an integer to use approximate
distance-based spacing (in mm).
.. versionchanged:: 0.18
Support for integers for distance-based spacing.
surface : str
The surface to use.
%(subjects_dir)s
add_dist : bool | str
Add distance and patch information to the source space. This takes some
time so precomputing it is recommended. Can also be 'patch' to only
compute patch information (requires SciPy 1.3+).
.. versionchanged:: 0.20
Support for add_dist='patch'.
%(n_jobs)s
Ignored if ``add_dist=='patch'``.
%(verbose)s
Returns
-------
src : SourceSpaces
The source space for each hemisphere.
See Also
--------
setup_volume_source_space
"""
cmd = ('setup_source_space(%s, spacing=%s, surface=%s, '
'subjects_dir=%s, add_dist=%s, verbose=%s)'
% (subject, spacing, surface, subjects_dir, add_dist, verbose))
subjects_dir = get_subjects_dir(subjects_dir, raise_error=True)
surfs = [op.join(subjects_dir, subject, 'surf', hemi + surface)
for hemi in ['lh.', 'rh.']]
for surf, hemi in zip(surfs, ['LH', 'RH']):
if surf is not None and not op.isfile(surf):
raise IOError('Could not find the %s surface %s'
% (hemi, surf))
logger.info('Setting up the source space with the following parameters:\n')
logger.info('SUBJECTS_DIR = %s' % subjects_dir)
logger.info('Subject = %s' % subject)
logger.info('Surface = %s' % surface)
stype, sval, ico_surf, src_type_str = _check_spacing(spacing)
logger.info('')
del spacing
logger.info('>>> 1. Creating the source space...\n')
# mne_make_source_space ... actually make the source spaces
src = []
# pre-load ico/oct surf (once) for speed, if necessary
if stype not in ('spacing', 'all'):
logger.info('Doing the %shedral vertex picking...'
% (dict(ico='icosa', oct='octa')[stype],))
for hemi, surf in zip(['lh', 'rh'], surfs):
logger.info('Loading %s...' % surf)
# Setup the surface spacing in the MRI coord frame
if stype != 'all':
logger.info('Mapping %s %s -> %s (%d) ...'
% (hemi, subject, stype, sval))
s = _create_surf_spacing(surf, hemi, subject, stype, ico_surf,
subjects_dir)
logger.info('loaded %s %d/%d selected to source space (%s)'
% (op.split(surf)[1], s['nuse'], s['np'], src_type_str))
src.append(s)
logger.info('') # newline after both subject types are run
# Fill in source space info
hemi_ids = [FIFF.FIFFV_MNE_SURF_LEFT_HEMI, FIFF.FIFFV_MNE_SURF_RIGHT_HEMI]
for s, s_id in zip(src, hemi_ids):
# Add missing fields
s.update(dict(dist=None, dist_limit=None, nearest=None, type='surf',
nearest_dist=None, pinfo=None, patch_inds=None, id=s_id,
coord_frame=FIFF.FIFFV_COORD_MRI))
s['rr'] /= 1000.0
del s['tri_area']
del s['tri_cent']
del s['tri_nn']
del s['neighbor_tri']
# upconvert to object format from lists
src = SourceSpaces(src, dict(working_dir=os.getcwd(), command_line=cmd))
if add_dist:
dist_limit = 0. if add_dist == 'patch' else np.inf
add_source_space_distances(src, dist_limit=dist_limit,
n_jobs=n_jobs, verbose=verbose)
# write out if requested, then return the data
logger.info('You are now one step closer to computing the gain matrix')
return src
def _check_volume_labels(volume_label, mri, name='volume_label'):
_validate_type(mri, 'path-like', 'mri when %s is not None' % (name,))
mri = _check_fname(mri, overwrite='read', must_exist=True)
if isinstance(volume_label, str):
volume_label = [volume_label]
_validate_type(volume_label, (list, tuple, dict), name) # should be
if not isinstance(volume_label, dict):
# Turn it into a dict
if not mri.endswith('aseg.mgz'):
raise RuntimeError(
'Must use a *aseg.mgz file unless %s is a dict, got %s'
% (name, op.basename(mri)))
lut, _ = read_freesurfer_lut()
use_volume_label = dict()
for label in volume_label:
if label not in lut:
raise ValueError(
'Volume %r not found in file %s. Double check '
'FreeSurfer lookup table.%s'
% (label, mri, _suggest(label, lut)))
use_volume_label[label] = lut[label]
volume_label = use_volume_label
for label, id_ in volume_label.items():
_validate_type(label, str, 'volume_label keys')
_validate_type(id_, 'int-like', 'volume_labels[%r]' % (label,))
volume_label = {k: _ensure_int(v) for k, v in volume_label.items()}
return volume_label
@verbose
def setup_volume_source_space(subject=None, pos=5.0, mri=None,
sphere=None, bem=None,
surface=None, mindist=5.0, exclude=0.0,
subjects_dir=None, volume_label=None,
add_interpolator=True, sphere_units='m',
single_volume=False, verbose=None):
"""Set up a volume source space with grid spacing or discrete source space.
Parameters
----------
subject : str | None
Subject to process. If None, the path to the MRI volume must be
absolute to get a volume source space. If a subject name
is provided the T1.mgz file will be found automatically.
Defaults to None.
pos : float | dict
Positions to use for sources. If float, a grid will be constructed
with the spacing given by ``pos`` in mm, generating a volume source
space. If dict, pos['rr'] and pos['nn'] will be used as the source
space locations (in meters) and normals, respectively, creating a
discrete source space.
.. note:: For a discrete source space (``pos`` is a dict),
``mri`` must be None.
mri : str | None
The filename of an MRI volume (mgh or mgz) to create the
interpolation matrix over. Source estimates obtained in the
volume source space can then be morphed onto the MRI volume
using this interpolator. If pos is a dict, this cannot be None.
If subject name is provided, ``pos`` is a float or ``volume_label``
are not provided then the ``mri`` parameter will default to 'T1.mgz'
or ``aseg.mgz``, respectively, else it will stay None.
sphere : ndarray, shape (4,) | ConductorModel | None
Define spherical source space bounds using origin and radius given
by (ox, oy, oz, rad) in ``sphere_units``.
Only used if ``bem`` and ``surface`` are both None. Can also be a
spherical ConductorModel, which will use the origin and radius.
None (the default) uses a head-digitization fit.
bem : str | None | ConductorModel
Define source space bounds using a BEM file (specifically the inner
skull surface) or a ConductorModel for a 1-layer of 3-layers BEM.
surface : str | dict | None
Define source space bounds using a FreeSurfer surface file. Can
also be a dictionary with entries ``'rr'`` and ``'tris'``, such as
those returned by :func:`mne.read_surface`.
mindist : float
Exclude points closer than this distance (mm) to the bounding surface.
exclude : float
Exclude points closer than this distance (mm) from the center of mass
of the bounding surface.
%(subjects_dir)s
volume_label : str | dict | list | None
Region(s) of interest to use. None (default) will create a single
whole-brain source space. Otherwise, a separate source space will be
created for each entry in the list or dict (str will be turned into
a single-element list). If list of str, standard Freesurfer labels
are assumed. If dict, should be a mapping of region names to atlas
id numbers, allowing the use of other atlases.
.. versionchanged:: 0.21.0
Support for dict added.
add_interpolator : bool
If True and ``mri`` is not None, then an interpolation matrix
will be produced.
sphere_units : str
Defaults to ``"m"``.
.. versionadded:: 0.20
single_volume : bool
If True, multiple values of ``volume_label`` will be merged into a
a single source space instead of occupying multiple source spaces
(one for each sub-volume), i.e., ``len(src)`` will be ``1`` instead of
``len(volume_label)``. This can help conserve memory and disk space
when many labels are used.
.. versionadded:: 0.21
%(verbose)s
Returns
-------
src : SourceSpaces
A :class:`SourceSpaces` object containing one source space for each
entry of ``volume_labels``, or a single source space if
``volume_labels`` was not specified.
See Also
--------
setup_source_space
Notes
-----
Volume source spaces are related to an MRI image such as T1 and allow to
visualize source estimates overlaid on MRIs and to morph estimates
to a template brain for group analysis. Discrete source spaces
don't allow this. If you provide a subject name the T1 MRI will be
used by default.
When you work with a source space formed from a grid you need to specify
the domain in which the grid will be defined. There are three ways
of specifying this:
(i) sphere, (ii) bem model, and (iii) surface.
The default behavior is to use sphere model
(``sphere=(0.0, 0.0, 0.0, 90.0)``) if ``bem`` or ``surface`` is not
``None`` then ``sphere`` is ignored.
If you're going to use a BEM conductor model for forward model
it is recommended to pass it here.
To create a discrete source space, ``pos`` must be a dict, ``mri`` must be
None, and ``volume_label`` must be None. To create a whole brain volume
source space, ``pos`` must be a float and 'mri' must be provided.
To create a volume source space from label, ``pos`` must be a float,
``volume_label`` must be provided, and 'mri' must refer to a .mgh or .mgz
file with values corresponding to the freesurfer lookup-table (typically
``aseg.mgz``).
"""
subjects_dir = get_subjects_dir(subjects_dir)
_validate_type(
volume_label, (str, list, tuple, dict, None), 'volume_label')
if bem is not None and surface is not None:
raise ValueError('Only one of "bem" and "surface" should be '
'specified')
if mri is None and subject is not None:
if volume_label is not None:
mri = 'aseg.mgz'
elif _is_numeric(pos):
mri = 'T1.mgz'
if mri is not None:
mri = _check_mri(mri, subject, subjects_dir)
if isinstance(pos, dict):
raise ValueError('Cannot create interpolation matrix for '
'discrete source space, mri must be None if '
'pos is a dict')
if volume_label is not None:
volume_label = _check_volume_labels(volume_label, mri)
assert volume_label is None or isinstance(volume_label, dict)
sphere = _check_sphere(sphere, sphere_units=sphere_units)
# triage bounding argument
if bem is not None:
logger.info('BEM : %s', bem)
elif surface is not None:
if isinstance(surface, dict):
if not all(key in surface for key in ['rr', 'tris']):
raise KeyError('surface, if dict, must have entries "rr" '
'and "tris"')
# let's make sure we have geom info
complete_surface_info(surface, copy=False, verbose=False)
surf_extra = 'dict()'
elif isinstance(surface, str):
if not op.isfile(surface):
raise IOError('surface file "%s" not found' % surface)
surf_extra = surface
logger.info('Boundary surface file : %s', surf_extra)
else:
logger.info('Sphere : origin at (%.1f %.1f %.1f) mm'
% (1000 * sphere[0], 1000 * sphere[1], 1000 * sphere[2]))
logger.info(' radius : %.1f mm' % (1000 * sphere[3],))
# triage pos argument
if isinstance(pos, dict):
if not all(key in pos for key in ['rr', 'nn']):
raise KeyError('pos, if dict, must contain "rr" and "nn"')
pos_extra = 'dict()'
else: # pos should be float-like
try:
pos = float(pos)
except (TypeError, ValueError):
raise ValueError('pos must be a dict, or something that can be '
'cast to float()')
if not isinstance(pos, float):
logger.info('Source location file : %s', pos_extra)
logger.info('Assuming input in millimeters')
logger.info('Assuming input in MRI coordinates')
if isinstance(pos, float):
logger.info('grid : %.1f mm' % pos)
logger.info('mindist : %.1f mm' % mindist)
pos /= 1000.0 # convert pos from m to mm
if exclude > 0.0:
logger.info('Exclude : %.1f mm' % exclude)
vol_info = dict()
if mri is not None:
logger.info('MRI volume : %s' % mri)
logger.info('')
logger.info('Reading %s...' % mri)
vol_info = _get_mri_info_data(mri, data=volume_label is not None)
exclude /= 1000.0 # convert exclude from m to mm
logger.info('')
# Explicit list of points
if not isinstance(pos, float):
# Make the grid of sources
sp = [_make_discrete_source_space(pos)]
else:
# Load the brain surface as a template
if isinstance(bem, str):
# read bem surface in the MRI coordinate frame
surf = read_bem_surfaces(bem, s_id=FIFF.FIFFV_BEM_SURF_ID_BRAIN,
verbose=False)
logger.info('Loaded inner skull from %s (%d nodes)'
% (bem, surf['np']))
elif bem is not None and bem.get('is_sphere') is False:
# read bem surface in the MRI coordinate frame
which = np.where([surf['id'] == FIFF.FIFFV_BEM_SURF_ID_BRAIN
for surf in bem['surfs']])[0]
if len(which) != 1:
raise ValueError('Could not get inner skull surface from BEM')
surf = bem['surfs'][which[0]]
assert surf['id'] == FIFF.FIFFV_BEM_SURF_ID_BRAIN
if surf['coord_frame'] != FIFF.FIFFV_COORD_MRI:
raise ValueError('BEM is not in MRI coordinates, got %s'
% (_coord_frame_name(surf['coord_frame']),))
logger.info('Taking inner skull from %s' % bem)
elif surface is not None:
if isinstance(surface, str):
# read the surface in the MRI coordinate frame
surf = read_surface(surface, return_dict=True)[-1]
else:
surf = surface
logger.info('Loaded bounding surface from %s (%d nodes)'
% (surface, surf['np']))
surf = deepcopy(surf)
surf['rr'] *= 1e-3 # must be converted to meters
else: # Load an icosahedron and use that as the surface
logger.info('Setting up the sphere...')
surf = dict(R=sphere[3], r0=sphere[:3])
# Make the grid of sources in MRI space
sp = _make_volume_source_space(
surf, pos, exclude, mindist, mri, volume_label,
vol_info=vol_info, single_volume=single_volume)
del sphere
assert isinstance(sp, list)
assert len(sp) == 1 if (volume_label is None or
single_volume) else len(volume_label)
# Compute an interpolation matrix to show data in MRI_VOXEL coord frame
if mri is not None:
if add_interpolator:
_add_interpolator(sp)
elif sp[0]['type'] == 'vol':
# If there is no interpolator, it's actually a discrete source space
sp[0]['type'] = 'discrete'
# do some cleaning
if volume_label is None and 'seg_name' in sp[0]:
del sp[0]['seg_name']
for s in sp:
if 'vol_dims' in s:
del s['vol_dims']
# Save it
sp = _complete_vol_src(sp, subject)
return sp
def _complete_vol_src(sp, subject=None):
for s in sp:
s.update(dict(nearest=None, dist=None, use_tris=None, patch_inds=None,
dist_limit=None, pinfo=None, ntri=0, nearest_dist=None,
nuse_tri=0, tris=None, subject_his_id=subject))
sp = SourceSpaces(sp, dict(working_dir=os.getcwd(), command_line='None'))
return sp
def _make_voxel_ras_trans(move, ras, voxel_size):
"""Make a transformation from MRI_VOXEL to MRI surface RAS (i.e. MRI)."""
assert voxel_size.ndim == 1
assert voxel_size.size == 3
rot = ras.T * voxel_size[np.newaxis, :]
assert rot.ndim == 2
assert rot.shape[0] == 3
assert rot.shape[1] == 3
trans = np.c_[np.r_[rot, np.zeros((1, 3))], np.r_[move, 1.0]]
t = Transform('mri_voxel', 'mri', trans)
return t
def _make_discrete_source_space(pos, coord_frame='mri'):
"""Use a discrete set of source locs/oris to make src space.
Parameters
----------
pos : dict
Must have entries "rr" and "nn". Data should be in meters.
coord_frame : str
The coordinate frame in which the positions are given; default: 'mri'.
The frame must be one defined in transforms.py:_str_to_frame
Returns
-------
src : dict
The source space.
"""
# Check that coordinate frame is valid
if coord_frame not in _str_to_frame: # will fail if coord_frame not string
raise KeyError('coord_frame must be one of %s, not "%s"'
% (list(_str_to_frame.keys()), coord_frame))
coord_frame = _str_to_frame[coord_frame] # now an int
# process points (copy and cast)
rr = np.array(pos['rr'], float)
nn = np.array(pos['nn'], float)
if not (rr.ndim == nn.ndim == 2 and nn.shape[0] == nn.shape[0] and
rr.shape[1] == nn.shape[1] and np.isfinite(rr).all() and
np.isfinite(nn).all()):
raise RuntimeError('"rr" and "nn" must both be finite 2D arrays with '
'the same number of rows and 3 columns')
npts = rr.shape[0]
_normalize_vectors(nn)
nz = np.sum(np.sum(nn * nn, axis=1) == 0)
if nz != 0:
raise RuntimeError('%d sources have zero length normal' % nz)
logger.info('Positions (in meters) and orientations')
logger.info('%d sources' % npts)
# Ready to make the source space
sp = dict(coord_frame=coord_frame, type='discrete', nuse=npts, np=npts,
inuse=np.ones(npts, int), vertno=np.arange(npts), rr=rr, nn=nn,
id=-1)
return sp
def _make_volume_source_space(surf, grid, exclude, mindist, mri=None,
volume_labels=None, do_neighbors=True, n_jobs=1,
vol_info={}, single_volume=False):
"""Make a source space which covers the volume bounded by surf."""
# Figure out the grid size in the MRI coordinate frame
if 'rr' in surf:
mins = np.min(surf['rr'], axis=0)
maxs = np.max(surf['rr'], axis=0)
cm = np.mean(surf['rr'], axis=0) # center of mass
maxdist = np.linalg.norm(surf['rr'] - cm, axis=1).max()
else:
mins = surf['r0'] - surf['R']
maxs = surf['r0'] + surf['R']
cm = surf['r0'].copy()
maxdist = surf['R']
# Define the sphere which fits the surface
logger.info('Surface CM = (%6.1f %6.1f %6.1f) mm'
% (1000 * cm[0], 1000 * cm[1], 1000 * cm[2]))
logger.info('Surface fits inside a sphere with radius %6.1f mm'
% (1000 * maxdist))
logger.info('Surface extent:')
for c, mi, ma in zip('xyz', mins, maxs):
logger.info(' %s = %6.1f ... %6.1f mm'
% (c, 1000 * mi, 1000 * ma))
maxn = np.array([np.floor(np.abs(m) / grid) + 1 if m > 0 else -
np.floor(np.abs(m) / grid) - 1 for m in maxs], int)
minn = np.array([np.floor(np.abs(m) / grid) + 1 if m > 0 else -
np.floor(np.abs(m) / grid) - 1 for m in mins], int)
logger.info('Grid extent:')
for c, mi, ma in zip('xyz', minn, maxn):
logger.info(' %s = %6.1f ... %6.1f mm'
% (c, 1000 * mi * grid, 1000 * ma * grid))
# Now make the initial grid
ns = tuple(maxn - minn + 1)
npts = np.prod(ns)
nrow = ns[0]
ncol = ns[1]
nplane = nrow * ncol
# x varies fastest, then y, then z (can use unravel to do this)
rr = np.meshgrid(np.arange(minn[2], maxn[2] + 1),
np.arange(minn[1], maxn[1] + 1),
np.arange(minn[0], maxn[0] + 1), indexing='ij')
x, y, z = rr[2].ravel(), rr[1].ravel(), rr[0].ravel()
rr = np.array([x * grid, y * grid, z * grid]).T
sp = dict(np=npts, nn=np.zeros((npts, 3)), rr=rr,
inuse=np.ones(npts, bool), type='vol', nuse=npts,
coord_frame=FIFF.FIFFV_COORD_MRI, id=-1, shape=ns)
sp['nn'][:, 2] = 1.0
assert sp['rr'].shape[0] == npts
logger.info('%d sources before omitting any.', sp['nuse'])
# Exclude infeasible points
dists = np.linalg.norm(sp['rr'] - cm, axis=1)
bads = np.where(np.logical_or(dists < exclude, dists > maxdist))[0]
sp['inuse'][bads] = False
sp['nuse'] -= len(bads)
logger.info('%d sources after omitting infeasible sources not within '
'%0.1f - %0.1f mm.',
sp['nuse'], 1000 * exclude, 1000 * maxdist)
if 'rr' in surf:
_filter_source_spaces(surf, mindist, None, [sp], n_jobs)
else: # sphere
vertno = np.where(sp['inuse'])[0]
bads = (np.linalg.norm(sp['rr'][vertno] - surf['r0'], axis=-1) >=
surf['R'] - mindist / 1000.)
sp['nuse'] -= bads.sum()
sp['inuse'][vertno[bads]] = False
sp['vertno'] = np.where(sp['inuse'])[0]
del vertno
del surf
logger.info('%d sources remaining after excluding the sources outside '
'the surface and less than %6.1f mm inside.'
% (sp['nuse'], mindist))
# Restrict sources to volume of interest
if volume_labels is None:
sp['seg_name'] = 'the whole brain'
sps = [sp]
else:
if not do_neighbors:
raise RuntimeError('volume_label cannot be None unless '
'do_neighbors is True')
sps = list()
orig_sp = sp
# reduce the sizes when we deepcopy
for volume_label, id_ in volume_labels.items():
# this saves us some memory
memodict = dict()
for key in ('rr', 'nn'):
if key in orig_sp:
arr = orig_sp[key]
memodict[id(arr)] = arr
sp = deepcopy(orig_sp, memodict)
good = _get_atlas_values(vol_info, sp['rr'][sp['vertno']]) == id_
n_good = good.sum()
logger.info(' Selected %d voxel%s from %s'
% (n_good, _pl(n_good), volume_label))
if n_good == 0:
warn('Found no usable vertices in volume label '
f'{repr(volume_label)} (id={id_}) using a '
f'{grid * 1000:0.1f} mm grid')
# Update source info
sp['inuse'][sp['vertno'][~good]] = False
sp['vertno'] = sp['vertno'][good]
sp['nuse'] = sp['inuse'].sum()
sp['seg_name'] = volume_label
sp['mri_file'] = mri
sps.append(sp)
del orig_sp
assert len(sps) == len(volume_labels)
# This will undo some of the work above, but the calculations are
# pretty trivial so allow it
if single_volume:
for sp in sps[1:]:
sps[0]['inuse'][sp['vertno']] = True
sp = sps[0]
sp['seg_name'] = '+'.join(s['seg_name'] for s in sps)
sps = sps[:1]
sp['vertno'] = np.where(sp['inuse'])[0]
sp['nuse'] = len(sp['vertno'])
del sp, volume_labels
if not do_neighbors:
return sps
k = np.arange(npts)
neigh = np.empty((26, npts), int)
neigh.fill(-1)
# Figure out each neighborhood:
# 6-neighborhood first
idxs = [z > minn[2], x < maxn[0], y < maxn[1],
x > minn[0], y > minn[1], z < maxn[2]]
offsets = [-nplane, 1, nrow, -1, -nrow, nplane]
for n, idx, offset in zip(neigh[:6], idxs, offsets):
n[idx] = k[idx] + offset
# Then the rest to complete the 26-neighborhood
# First the plane below
idx1 = z > minn[2]
idx2 = np.logical_and(idx1, x < maxn[0])
neigh[6, idx2] = k[idx2] + 1 - nplane
idx3 = np.logical_and(idx2, y < maxn[1])
neigh[7, idx3] = k[idx3] + 1 + nrow - nplane
idx2 = np.logical_and(idx1, y < maxn[1])
neigh[8, idx2] = k[idx2] + nrow - nplane
idx2 = np.logical_and(idx1, x > minn[0])
idx3 = np.logical_and(idx2, y < maxn[1])
neigh[9, idx3] = k[idx3] - 1 + nrow - nplane
neigh[10, idx2] = k[idx2] - 1 - nplane
idx3 = np.logical_and(idx2, y > minn[1])
neigh[11, idx3] = k[idx3] - 1 - nrow - nplane
idx2 = np.logical_and(idx1, y > minn[1])
neigh[12, idx2] = k[idx2] - nrow - nplane
idx3 = np.logical_and(idx2, x < maxn[0])
neigh[13, idx3] = k[idx3] + 1 - nrow - nplane
# Then the same plane
idx1 = np.logical_and(x < maxn[0], y < maxn[1])
neigh[14, idx1] = k[idx1] + 1 + nrow
idx1 = x > minn[0]
idx2 = np.logical_and(idx1, y < maxn[1])
neigh[15, idx2] = k[idx2] - 1 + nrow
idx2 = np.logical_and(idx1, y > minn[1])
neigh[16, idx2] = k[idx2] - 1 - nrow
idx1 = np.logical_and(y > minn[1], x < maxn[0])
neigh[17, idx1] = k[idx1] + 1 - nrow - nplane
# Finally one plane above
idx1 = z < maxn[2]
idx2 = np.logical_and(idx1, x < maxn[0])
neigh[18, idx2] = k[idx2] + 1 + nplane
idx3 = np.logical_and(idx2, y < maxn[1])
neigh[19, idx3] = k[idx3] + 1 + nrow + nplane
idx2 = np.logical_and(idx1, y < maxn[1])
neigh[20, idx2] = k[idx2] + nrow + nplane
idx2 = np.logical_and(idx1, x > minn[0])
idx3 = np.logical_and(idx2, y < maxn[1])
neigh[21, idx3] = k[idx3] - 1 + nrow + nplane
neigh[22, idx2] = k[idx2] - 1 + nplane
idx3 = np.logical_and(idx2, y > minn[1])
neigh[23, idx3] = k[idx3] - 1 - nrow + nplane
idx2 = np.logical_and(idx1, y > minn[1])
neigh[24, idx2] = k[idx2] - nrow + nplane
idx3 = np.logical_and(idx2, x < maxn[0])
neigh[25, idx3] = k[idx3] + 1 - nrow + nplane
# Omit unused vertices from the neighborhoods
logger.info('Adjusting the neighborhood info.')
r0 = minn * grid
voxel_size = grid * np.ones(3)
ras = np.eye(3)
src_mri_t = _make_voxel_ras_trans(r0, ras, voxel_size)
neigh_orig = neigh
for sp in sps:
# remove non source-space points
neigh = neigh_orig.copy()
neigh[:, np.logical_not(sp['inuse'])] = -1
# remove these points from neigh
old_shape = neigh.shape
neigh = neigh.ravel()
checks = np.where(neigh >= 0)[0]
removes = np.logical_not(np.in1d(checks, sp['vertno']))
neigh[checks[removes]] = -1
neigh.shape = old_shape
neigh = neigh.T
# Thought we would need this, but C code keeps -1 vertices, so we will:
# neigh = [n[n >= 0] for n in enumerate(neigh[vertno])]
sp['neighbor_vert'] = neigh
# Set up the volume data (needed for creating the interpolation matrix)
sp['src_mri_t'] = src_mri_t
sp['vol_dims'] = maxn - minn + 1
for key in ('mri_width', 'mri_height', 'mri_depth', 'mri_volume_name',
'vox_mri_t', 'mri_ras_t'):
if key in vol_info:
sp[key] = vol_info[key]
_print_coord_trans(sps[0]['src_mri_t'], 'Source space : ')
for key in ('vox_mri_t', 'mri_ras_t'):
if key in sps[0]:
_print_coord_trans(sps[0][key], 'MRI volume : ')
return sps
def _vol_vertex(width, height, jj, kk, pp):
return jj + width * kk + pp * (width * height)
def _src_vol_dims(s):
w, h, d = [s[f'mri_{key}'] for key in ('width', 'height', 'depth')]
return w, h, d, np.prod([w, h, d])
def _add_interpolator(sp):
"""Compute a sparse matrix to interpolate the data into an MRI volume."""
# extract transformation information from mri
from scipy import sparse
mri_width, mri_height, mri_depth, nvox = _src_vol_dims(sp[0])
#
# Convert MRI voxels from destination (MRI volume) to source (volume
# source space subset) coordinates
#
combo_trans = combine_transforms(sp[0]['vox_mri_t'],
invert_transform(sp[0]['src_mri_t']),
'mri_voxel', 'mri_voxel')
logger.info('Setting up volume interpolation ...')
inuse = np.zeros(sp[0]['np'], bool)
for s_ in sp:
np.logical_or(inuse, s_['inuse'], out=inuse)
interp = _grid_interp(
sp[0]['vol_dims'], (mri_width, mri_height, mri_depth),
combo_trans['trans'], order=1, inuse=inuse)
assert isinstance(interp, sparse.csr_matrix)
# Compose the sparse matrices
for si, s in enumerate(sp):
if len(sp) == 1: # no need to do these gymnastics
this_interp = interp
else: # limit it rows that have any contribution from inuse
# This is the same as the following, but more efficient:
# any_ = np.asarray(
# interp[:, s['inuse'].astype(bool)].sum(1)
# )[:, 0].astype(bool)
any_ = np.zeros(interp.indices.size + 1, np.int64)
any_[1:] = s['inuse'][interp.indices]
np.cumsum(any_, out=any_)
any_ = np.diff(any_[interp.indptr]) > 0
assert any_.shape == (interp.shape[0],)
indptr = np.empty_like(interp.indptr)
indptr[0] = 0
indptr[1:] = np.diff(interp.indptr)
indptr[1:][~any_] = 0
np.cumsum(indptr, out=indptr)
mask = np.repeat(any_, np.diff(interp.indptr))
indices = interp.indices[mask]
data = interp.data[mask]
assert data.shape == indices.shape == (indptr[-1],)
this_interp = sparse.csr_matrix(
(data, indices, indptr), shape=interp.shape)
s['interpolator'] = this_interp
logger.info(' %d/%d nonzero values for %s'
% (len(s['interpolator'].data), nvox, s['seg_name']))
logger.info('[done]')
def _grid_interp(from_shape, to_shape, trans, order=1, inuse=None):
"""Compute a grid-to-grid linear or nearest interpolation given."""
from scipy import sparse
from_shape = np.array(from_shape, int)
to_shape = np.array(to_shape, int)
trans = np.array(trans, np.float64) # to -> from
assert trans.shape == (4, 4) and np.array_equal(trans[3], [0, 0, 0, 1])
assert from_shape.shape == to_shape.shape == (3,)
shape = (np.prod(to_shape), np.prod(from_shape))
if inuse is None:
inuse = np.ones(shape[1], bool)
assert inuse.dtype == bool
assert inuse.shape == (shape[1],)
data, indices, indptr = _grid_interp_jit(
from_shape, to_shape, trans, order, inuse)
data = np.concatenate(data)
indices = np.concatenate(indices)
indptr = np.cumsum(indptr)
interp = sparse.csr_matrix((data, indices, indptr), shape=shape)
return interp
# This is all set up to do jit, but it's actually slower!
def _grid_interp_jit(from_shape, to_shape, trans, order, inuse):
# Loop over slices to save (lots of) memory
# Note that it is the slowest incrementing index
# This is equivalent to using mgrid and reshaping, but faster
assert order in (0, 1)
data = list()
indices = list()
nvox = np.prod(to_shape)
indptr = np.zeros(nvox + 1, np.int32)
mri_width, mri_height, mri_depth = to_shape
r0__ = np.empty((4, mri_height, mri_width), np.float64)
r0__[0, :, :] = np.arange(mri_width)
r0__[1, :, :] = np.arange(mri_height).reshape(1, mri_height, 1)
r0__[3, :, :] = 1
r0_ = np.reshape(r0__, (4, mri_width * mri_height))
width, height, _ = from_shape
trans = np.ascontiguousarray(trans)
maxs = (from_shape - 1).reshape(1, 3)
for p in range(mri_depth):
r0_[2] = p
# Transform our vertices from their MRI space into our source space's
# frame (this is labeled as FIFFV_MNE_COORD_MRI_VOXEL, but it's
# really a subset of the entire volume!)
r0 = (trans @ r0_)[:3].T
if order == 0:
rx = np.round(r0).astype(np.int32)
keep = np.where(np.logical_and(np.all(rx >= 0, axis=1),
np.all(rx <= maxs, axis=1)))[0]
indptr[keep + p * mri_height * mri_width + 1] = 1
indices.append(_vol_vertex(width, height, *rx[keep].T))
data.append(np.ones(len(keep)))
continue
rn = np.floor(r0).astype(np.int32)
good = np.where(np.logical_and(np.all(rn >= -1, axis=1),
np.all(rn <= maxs, axis=1)))[0]
if len(good) == 0:
continue
rns = rn[good]
r0s = r0[good]
jj_g, kk_g, pp_g = (rns >= 0).T
jjp1_g, kkp1_g, ppp1_g = (rns < maxs).T # same as rns + 1 <= maxs
# now we take each MRI voxel *in this space*, and figure out how
# to make its value the weighted sum of voxels in the volume source
# space. This is a trilinear interpolation based on the
# fact that we know we're interpolating from one volumetric grid
# into another.
jj = rns[:, 0]
kk = rns[:, 1]
pp = rns[:, 2]
vss = np.empty((len(jj), 8), np.int32)
jjp1 = jj + 1
kkp1 = kk + 1
ppp1 = pp + 1
mask = np.empty((len(jj), 8), bool)
vss[:, 0] = _vol_vertex(width, height, jj, kk, pp)
mask[:, 0] = jj_g & kk_g & pp_g
vss[:, 1] = _vol_vertex(width, height, jjp1, kk, pp)
mask[:, 1] = jjp1_g & kk_g & pp_g
vss[:, 2] = _vol_vertex(width, height, jjp1, kkp1, pp)
mask[:, 2] = jjp1_g & kkp1_g & pp_g
vss[:, 3] = _vol_vertex(width, height, jj, kkp1, pp)
mask[:, 3] = jj_g & kkp1_g & pp_g
vss[:, 4] = _vol_vertex(width, height, jj, kk, ppp1)
mask[:, 4] = jj_g & kk_g & ppp1_g
vss[:, 5] = _vol_vertex(width, height, jjp1, kk, ppp1)
mask[:, 5] = jjp1_g & kk_g & ppp1_g
vss[:, 6] = _vol_vertex(width, height, jjp1, kkp1, ppp1)
mask[:, 6] = jjp1_g & kkp1_g & ppp1_g
vss[:, 7] = _vol_vertex(width, height, jj, kkp1, ppp1)
mask[:, 7] = jj_g & kkp1_g & ppp1_g
# figure out weights for each vertex
xf = r0s[:, 0] - rns[:, 0].astype(np.float64)
yf = r0s[:, 1] - rns[:, 1].astype(np.float64)
zf = r0s[:, 2] - rns[:, 2].astype(np.float64)
omxf = 1.0 - xf
omyf = 1.0 - yf
omzf = 1.0 - zf
this_w = np.empty((len(good), 8), np.float64)
this_w[:, 0] = omxf * omyf * omzf
this_w[:, 1] = xf * omyf * omzf
this_w[:, 2] = xf * yf * omzf
this_w[:, 3] = omxf * yf * omzf
this_w[:, 4] = omxf * omyf * zf
this_w[:, 5] = xf * omyf * zf
this_w[:, 6] = xf * yf * zf
this_w[:, 7] = omxf * yf * zf
# eliminate zeros
mask[this_w <= 0] = False
# eliminate rows where none of inuse are actually present
row_mask = mask.copy()
row_mask[mask] = inuse[vss[mask]]
mask[~(row_mask.any(axis=-1))] = False
# construct the parts we need
indices.append(vss[mask])
indptr[good + p * mri_height * mri_width + 1] = mask.sum(1)
data.append(this_w[mask])
return data, indices, indptr
def _pts_in_hull(pts, hull, tolerance=1e-12):
return np.all([np.dot(eq[:-1], pts.T) + eq[-1] <= tolerance
for eq in hull.equations], axis=0)
@verbose
def _filter_source_spaces(surf, limit, mri_head_t, src, n_jobs=1,
verbose=None):
"""Remove all source space points closer than a given limit (in mm)."""
if src[0]['coord_frame'] == FIFF.FIFFV_COORD_HEAD and mri_head_t is None:
raise RuntimeError('Source spaces are in head coordinates and no '
'coordinate transform was provided!')
# How close are the source points to the surface?
out_str = 'Source spaces are in '
if src[0]['coord_frame'] == FIFF.FIFFV_COORD_HEAD:
inv_trans = invert_transform(mri_head_t)
out_str += 'head coordinates.'
elif src[0]['coord_frame'] == FIFF.FIFFV_COORD_MRI:
out_str += 'MRI coordinates.'
else:
out_str += 'unknown (%d) coordinates.' % src[0]['coord_frame']
logger.info(out_str)
out_str = 'Checking that the sources are inside the surface'
if limit > 0.0:
out_str += ' and at least %6.1f mm away' % (limit)
logger.info(out_str + ' (will take a few...)')
# fit a sphere to a surf quickly
check_inside = _CheckInside(surf)
# Check that the source is inside surface (often the inner skull)
for s in src:
vertno = np.where(s['inuse'])[0] # can't trust s['vertno'] this deep
# Convert all points here first to save time
r1s = s['rr'][vertno]
if s['coord_frame'] == FIFF.FIFFV_COORD_HEAD:
r1s = apply_trans(inv_trans['trans'], r1s)
inside = check_inside(r1s, n_jobs)
omit_outside = (~inside).sum()
# vectorized nearest using BallTree (or cdist)
omit_limit = 0
if limit > 0.0:
# only check "inside" points
idx = np.where(inside)[0]
check_r1s = r1s[idx]
if check_inside.inner_r is not None:
# ... and those that are at least inner_sphere + limit away
mask = (np.linalg.norm(check_r1s - check_inside.cm, axis=-1) >=
check_inside.inner_r - limit / 1000.)
idx = idx[mask]
check_r1s = check_r1s[mask]
dists = _compute_nearest(
surf['rr'], check_r1s, return_dists=True, method='cKDTree')[1]
close = (dists < limit / 1000.0)
omit_limit = np.sum(close)
inside[idx[close]] = False
s['inuse'][vertno[~inside]] = False
del vertno
s['nuse'] -= (omit_outside + omit_limit)
s['vertno'] = np.where(s['inuse'])[0]
if omit_outside > 0:
extras = [omit_outside]
extras += ['s', 'they are'] if omit_outside > 1 else ['', 'it is']
logger.info(' %d source space point%s omitted because %s '
'outside the inner skull surface.' % tuple(extras))
if omit_limit > 0:
extras = [omit_limit]
extras += ['s'] if omit_outside > 1 else ['']
extras += [limit]
logger.info(' %d source space point%s omitted because of the '
'%6.1f-mm distance limit.' % tuple(extras))
# Adjust the patch inds as well if necessary
if omit_limit + omit_outside > 0:
_adjust_patch_info(s)
@verbose
def _adjust_patch_info(s, verbose=None):
"""Adjust patch information in place after vertex omission."""
if s.get('patch_inds') is not None:
if s['nearest'] is None:
# This shouldn't happen, but if it does, we can probably come
# up with a more clever solution
raise RuntimeError('Cannot adjust patch information properly, '
'please contact the mne-python developers')
_add_patch_info(s)
@verbose
def _ensure_src(src, kind=None, extra='', verbose=None):
"""Ensure we have a source space."""
_check_option(
'kind', kind, (None, 'surface', 'volume', 'mixed', 'discrete'))
msg = 'src must be a string or instance of SourceSpaces%s' % (extra,)
if _path_like(src):
src = str(src)
if not op.isfile(src):
raise IOError('Source space file "%s" not found' % src)
logger.info('Reading %s...' % src)
src = read_source_spaces(src, verbose=False)
if not isinstance(src, SourceSpaces):
raise ValueError('%s, got %s (type %s)' % (msg, src, type(src)))
if kind is not None:
if src.kind != kind and src.kind == 'mixed':
if kind == 'surface':
src = src[:2]
elif kind == 'volume':
src = src[2:]
if src.kind != kind:
raise ValueError('Source space must contain %s type, got '
'%s' % (kind, src.kind))
return src
def _ensure_src_subject(src, subject):
src_subject = src._subject
if subject is None:
subject = src_subject
if subject is None:
raise ValueError('source space is too old, subject must be '
'provided')
elif src_subject is not None and subject != src_subject:
raise ValueError('Mismatch between provided subject "%s" and subject '
'name "%s" in the source space'
% (subject, src_subject))
return subject
_DIST_WARN_LIMIT = 10242 # warn for anything larger than ICO-5
@verbose
def add_source_space_distances(src, dist_limit=np.inf, n_jobs=1, verbose=None):
"""Compute inter-source distances along the cortical surface.
This function will also try to add patch info for the source space.
It will only occur if the ``dist_limit`` is sufficiently high that all
points on the surface are within ``dist_limit`` of a point in the
source space.
Parameters
----------
src : instance of SourceSpaces
The source spaces to compute distances for.
dist_limit : float
The upper limit of distances to include (in meters).
Note: if limit < np.inf, scipy > 0.13 (bleeding edge as of
10/2013) must be installed. If 0, then only patch (nearest vertex)
information is added.
%(n_jobs)s
Ignored if ``dist_limit==0.``.
%(verbose)s
Returns
-------
src : instance of SourceSpaces
The original source spaces, with distance information added.
The distances are stored in src[n]['dist'].
Note: this function operates in-place.
Notes
-----
This function can be memory- and CPU-intensive. On a high-end machine
(2012) running 6 jobs in parallel, an ico-5 (10242 per hemi) source space
takes about 10 minutes to compute all distances (``dist_limit = np.inf``).
With ``dist_limit = 0.007``, computing distances takes about 1 minute.
We recommend computing distances once per source space and then saving
the source space to disk, as the computed distances will automatically be
stored along with the source space data for future use.
"""
from scipy.sparse import csr_matrix
from scipy.sparse.csgraph import dijkstra
n_jobs = check_n_jobs(n_jobs)
src = _ensure_src(src)
dist_limit = float(dist_limit)
if dist_limit < 0:
raise ValueError('dist_limit must be non-negative, got %s'
% (dist_limit,))
patch_only = (dist_limit == 0)
if patch_only and not check_version('scipy', '1.3'):
raise RuntimeError('scipy >= 1.3 is required to calculate patch '
'information only, consider upgrading SciPy or '
'using dist_limit=np.inf when running '
'add_source_space_distances')
if src.kind != 'surface':
raise RuntimeError('Currently all source spaces must be of surface '
'type')
parallel, p_fun, _ = parallel_func(_do_src_distances, n_jobs)
min_dists = list()
min_idxs = list()
msg = 'patch information' if patch_only else 'source space distances'
logger.info('Calculating %s (limit=%s mm)...' % (msg, 1000 * dist_limit))
max_n = max(s['nuse'] for s in src)
if not patch_only and max_n > _DIST_WARN_LIMIT:
warn('Computing distances for %d source space points (in one '
'hemisphere) will be very slow, consider using add_dist=False'
% (max_n,))
for s in src:
adjacency = mesh_dist(s['tris'], s['rr'])
if patch_only:
min_dist, _, min_idx = dijkstra(
adjacency, indices=s['vertno'],
min_only=True, return_predecessors=True)
min_dists.append(min_dist.astype(np.float32))
min_idxs.append(min_idx)
for key in ('dist', 'dist_limit'):
s[key] = None
else:
d = parallel(p_fun(adjacency, s['vertno'], r, dist_limit)
for r in np.array_split(np.arange(len(s['vertno'])),
n_jobs))
# deal with indexing so we can add patch info
min_idx = np.array([dd[1] for dd in d])
min_dist = np.array([dd[2] for dd in d])
midx = np.argmin(min_dist, axis=0)
range_idx = np.arange(len(s['rr']))
min_dist = min_dist[midx, range_idx]
min_idx = min_idx[midx, range_idx]
min_dists.append(min_dist)
min_idxs.append(min_idx)
# convert to sparse representation
d = np.concatenate([dd[0] for dd in d]).ravel() # already float32
idx = d > 0
d = d[idx]
i, j = np.meshgrid(s['vertno'], s['vertno'])
i = i.ravel()[idx]
j = j.ravel()[idx]
s['dist'] = csr_matrix(
(d, (i, j)), shape=(s['np'], s['np']), dtype=np.float32)
s['dist_limit'] = np.array([dist_limit], np.float32)
# Let's see if our distance was sufficient to allow for patch info
if not any(np.any(np.isinf(md)) for md in min_dists):
# Patch info can be added!
for s, min_dist, min_idx in zip(src, min_dists, min_idxs):
s['nearest'] = min_idx
s['nearest_dist'] = min_dist
_add_patch_info(s)
else:
logger.info('Not adding patch information, dist_limit too small')
return src
def _do_src_distances(con, vertno, run_inds, limit):
"""Compute source space distances in chunks."""
from scipy.sparse.csgraph import dijkstra
func = partial(dijkstra, limit=limit)
chunk_size = 20 # save memory by chunking (only a little slower)
lims = np.r_[np.arange(0, len(run_inds), chunk_size), len(run_inds)]
n_chunks = len(lims) - 1
# eventually we want this in float32, so save memory by only storing 32-bit
d = np.empty((len(run_inds), len(vertno)), np.float32)
min_dist = np.empty((n_chunks, con.shape[0]))
min_idx = np.empty((n_chunks, con.shape[0]), np.int32)
range_idx = np.arange(con.shape[0])
for li, (l1, l2) in enumerate(zip(lims[:-1], lims[1:])):
idx = vertno[run_inds[l1:l2]]
out = func(con, indices=idx)
midx = np.argmin(out, axis=0)
min_idx[li] = idx[midx]
min_dist[li] = out[midx, range_idx]
d[l1:l2] = out[:, vertno]
midx = np.argmin(min_dist, axis=0)
min_dist = min_dist[midx, range_idx]
min_idx = min_idx[midx, range_idx]
d[d == np.inf] = 0 # scipy will give us np.inf for uncalc. distances
return d, min_idx, min_dist
# XXX this should probably be deprecated because it returns surface Labels,
# and probably isn't the way to go moving forward
# XXX this also assumes that the first two source spaces are surf without
# checking, which might not be the case (could be all volumes)
@fill_doc
def get_volume_labels_from_src(src, subject, subjects_dir):
"""Return a list of Label of segmented volumes included in the src space.
Parameters
----------
src : instance of SourceSpaces
The source space containing the volume regions.
%(subject)s
subjects_dir : str
Freesurfer folder of the subjects.
Returns
-------
labels_aseg : list of Label
List of Label of segmented volumes included in src space.
"""
from . import Label
from ._freesurfer import get_volume_labels_from_aseg
# Read the aseg file
aseg_fname = op.join(subjects_dir, subject, 'mri', 'aseg.mgz')
all_labels_aseg = get_volume_labels_from_aseg(
aseg_fname, return_colors=True)
# Create a list of Label
if len(src) < 2:
raise ValueError('No vol src space in src')
if any(np.any(s['type'] != 'vol') for s in src[2:]):
raise ValueError('source spaces have to be of vol type')
labels_aseg = list()
for nr in range(2, len(src)):
vertices = src[nr]['vertno']
pos = src[nr]['rr'][src[nr]['vertno'], :]
roi_str = src[nr]['seg_name']
try:
ind = all_labels_aseg[0].index(roi_str)
color = np.array(all_labels_aseg[1][ind]) / 255
except ValueError:
pass
if 'left' in roi_str.lower():
hemi = 'lh'
roi_str = roi_str.replace('Left-', '') + '-lh'
elif 'right' in roi_str.lower():
hemi = 'rh'
roi_str = roi_str.replace('Right-', '') + '-rh'
else:
hemi = 'both'
label = Label(vertices=vertices, pos=pos, hemi=hemi,
name=roi_str, color=color,
subject=subject)
labels_aseg.append(label)
return labels_aseg
def _get_hemi(s):
"""Get a hemisphere from a given source space."""
if s['type'] != 'surf':
raise RuntimeError('Only surface source spaces supported')
if s['id'] == FIFF.FIFFV_MNE_SURF_LEFT_HEMI:
return 'lh', 0, s['id']
elif s['id'] == FIFF.FIFFV_MNE_SURF_RIGHT_HEMI:
return 'rh', 1, s['id']
else:
raise ValueError('unknown surface ID %s' % s['id'])
def _get_vertex_map_nn(fro_src, subject_from, subject_to, hemi, subjects_dir,
to_neighbor_tri=None):
"""Get a nearest-neigbor vertex match for a given hemi src.
The to_neighbor_tri can optionally be passed in to avoid recomputation
if it's already available.
"""
# adapted from mne_make_source_space.c, knowing accurate=False (i.e.
# nearest-neighbor mode should be used)
logger.info('Mapping %s %s -> %s (nearest neighbor)...'
% (hemi, subject_from, subject_to))
regs = [op.join(subjects_dir, s, 'surf', '%s.sphere.reg' % hemi)
for s in (subject_from, subject_to)]
reg_fro, reg_to = [read_surface(r, return_dict=True)[-1] for r in regs]
if to_neighbor_tri is not None:
reg_to['neighbor_tri'] = to_neighbor_tri
if 'neighbor_tri' not in reg_to:
reg_to['neighbor_tri'] = _triangle_neighbors(reg_to['tris'],
reg_to['np'])
morph_inuse = np.zeros(len(reg_to['rr']), int)
best = np.zeros(fro_src['np'], int)
ones = _compute_nearest(reg_to['rr'], reg_fro['rr'][fro_src['vertno']])
for v, one in zip(fro_src['vertno'], ones):
# if it were actually a proper morph map, we would do this, but since
# we know it's nearest neighbor list, we don't need to:
# this_mm = mm[v]
# one = this_mm.indices[this_mm.data.argmax()]
if morph_inuse[one]:
# Try the nearest neighbors
neigh = _get_surf_neighbors(reg_to, one) # on demand calc
was = one
one = neigh[np.where(~morph_inuse[neigh])[0]]
if len(one) == 0:
raise RuntimeError('vertex %d would be used multiple times.'
% one)
one = one[0]
logger.info('Source space vertex moved from %d to %d because of '
'double occupation.' % (was, one))
best[v] = one
morph_inuse[one] = True
return best
@verbose
def morph_source_spaces(src_from, subject_to, surf='white', subject_from=None,
subjects_dir=None, verbose=None):
"""Morph an existing source space to a different subject.
.. warning:: This can be used in place of morphing source estimates for
multiple subjects, but there may be consequences in terms
of dipole topology.
Parameters
----------
src_from : instance of SourceSpaces
Surface source spaces to morph.
subject_to : str
The destination subject.
surf : str
The brain surface to use for the new source space.
subject_from : str | None
The "from" subject. For most source spaces this shouldn't need
to be provided, since it is stored in the source space itself.
subjects_dir : str | None
Path to SUBJECTS_DIR if it is not set in the environment.
%(verbose)s
Returns
-------
src : instance of SourceSpaces
The morphed source spaces.
Notes
-----
.. versionadded:: 0.10.0
"""
# adapted from mne_make_source_space.c
src_from = _ensure_src(src_from)
subject_from = _ensure_src_subject(src_from, subject_from)
subjects_dir = get_subjects_dir(subjects_dir, raise_error=True)
src_out = list()
for fro in src_from:
hemi, idx, id_ = _get_hemi(fro)
to = op.join(subjects_dir, subject_to, 'surf', '%s.%s' % (hemi, surf,))
logger.info('Reading destination surface %s' % (to,))
to = read_surface(to, return_dict=True, verbose=False)[-1]
complete_surface_info(to, copy=False)
# Now we morph the vertices to the destination
# The C code does something like this, but with a nearest-neighbor
# mapping instead of the weighted one::
#
# >>> mm = read_morph_map(subject_from, subject_to, subjects_dir)
#
# Here we use a direct NN calculation, since picking the max from the
# existing morph map (which naively one might expect to be equivalent)
# differs for ~3% of vertices.
best = _get_vertex_map_nn(fro, subject_from, subject_to, hemi,
subjects_dir, to['neighbor_tri'])
for key in ('neighbor_tri', 'tri_area', 'tri_cent', 'tri_nn',
'use_tris'):
del to[key]
to['vertno'] = np.sort(best[fro['vertno']])
to['inuse'] = np.zeros(len(to['rr']), int)
to['inuse'][to['vertno']] = True
to['use_tris'] = best[fro['use_tris']]
to.update(nuse=len(to['vertno']), nuse_tri=len(to['use_tris']),
nearest=None, nearest_dist=None, patch_inds=None, pinfo=None,
dist=None, id=id_, dist_limit=None, type='surf',
coord_frame=FIFF.FIFFV_COORD_MRI, subject_his_id=subject_to,
rr=to['rr'] / 1000.)
src_out.append(to)
logger.info('[done]\n')
info = dict(working_dir=os.getcwd(), command_line=_get_call_line())
return SourceSpaces(src_out, info=info)
@verbose
def _get_morph_src_reordering(vertices, src_from, subject_from, subject_to,
subjects_dir=None, verbose=None):
"""Get the reordering indices for a morphed source space.
Parameters
----------
vertices : list
The vertices for the left and right hemispheres.
src_from : instance of SourceSpaces
The original source space.
subject_from : str
The source subject.
subject_to : str
The destination subject.
%(subjects_dir)s
%(verbose)s
Returns
-------
data_idx : ndarray, shape (n_vertices,)
The array used to reshape the data.
from_vertices : list
The right and left hemisphere vertex numbers for the "from" subject.
"""
subjects_dir = get_subjects_dir(subjects_dir, raise_error=True)
from_vertices = list()
data_idxs = list()
offset = 0
for ii, hemi in enumerate(('lh', 'rh')):
# Get the mapping from the original source space to the destination
# subject's surface vertex numbers
best = _get_vertex_map_nn(src_from[ii], subject_from, subject_to,
hemi, subjects_dir)
full_mapping = best[src_from[ii]['vertno']]
# Tragically, we might not have all of our vertno left (e.g. because
# some are omitted during fwd calc), so we must do some indexing magic:
# From all vertices, a subset could be chosen by fwd calc:
used_vertices = np.in1d(full_mapping, vertices[ii])
from_vertices.append(src_from[ii]['vertno'][used_vertices])
remaining_mapping = full_mapping[used_vertices]
if not np.array_equal(np.sort(remaining_mapping), vertices[ii]) or \
not np.in1d(vertices[ii], full_mapping).all():
raise RuntimeError('Could not map vertices, perhaps the wrong '
'subject "%s" was provided?' % subject_from)
# And our data have been implicitly remapped by the forced ascending
# vertno order in source spaces
implicit_mapping = np.argsort(remaining_mapping) # happens to data
data_idx = np.argsort(implicit_mapping) # to reverse the mapping
data_idx += offset # hemisphere offset
data_idxs.append(data_idx)
offset += len(implicit_mapping)
data_idx = np.concatenate(data_idxs)
# this one is really just a sanity check for us, should never be violated
# by users
assert np.array_equal(np.sort(data_idx),
np.arange(sum(len(v) for v in vertices)))
return data_idx, from_vertices
def _compare_source_spaces(src0, src1, mode='exact', nearest=True,
dist_tol=1.5e-3):
"""Compare two source spaces.
Note: this function is also used by forward/tests/test_make_forward.py
"""
from numpy.testing import (assert_allclose, assert_array_equal,
assert_equal, assert_, assert_array_less)
from scipy.spatial.distance import cdist
if mode != 'exact' and 'approx' not in mode: # 'nointerp' can be appended
raise RuntimeError('unknown mode %s' % mode)
for si, (s0, s1) in enumerate(zip(src0, src1)):
# first check the keys
a, b = set(s0.keys()), set(s1.keys())
assert_equal(a, b, str(a ^ b))
for name in ['nuse', 'ntri', 'np', 'type', 'id']:
a, b = s0[name], s1[name]
if name == 'id': # workaround for old NumPy bug
a, b = int(a), int(b)
assert_equal(a, b, name)
for name in ['subject_his_id']:
if name in s0 or name in s1:
assert_equal(s0[name], s1[name], name)
for name in ['interpolator']:
if name in s0 or name in s1:
assert name in s0, f'{name} in s1 but not s0'
assert name in s1, f'{name} in s1 but not s0'
n = np.prod(s0['interpolator'].shape)
diffs = (s0['interpolator'] - s1['interpolator']).data
if len(diffs) > 0 and 'nointerp' not in mode:
# 0.1%
assert_array_less(
np.sqrt(np.sum(diffs * diffs) / n), 0.001,
err_msg=f'{name} > 0.1%')
for name in ['nn', 'rr', 'nuse_tri', 'coord_frame', 'tris']:
if s0[name] is None:
assert_(s1[name] is None, name)
else:
if mode == 'exact':
assert_array_equal(s0[name], s1[name], name)
else: # 'approx' in mode
atol = 1e-3 if name == 'nn' else 1e-4
assert_allclose(s0[name], s1[name], rtol=1e-3, atol=atol,
err_msg=name)
for name in ['seg_name']:
if name in s0 or name in s1:
assert_equal(s0[name], s1[name], name)
# these fields will exist if patch info was added
if nearest:
for name in ['nearest', 'nearest_dist', 'patch_inds']:
if s0[name] is None:
assert_(s1[name] is None, name)
else:
atol = 0 if mode == 'exact' else 1e-6
assert_allclose(s0[name], s1[name],
atol=atol, err_msg=name)
for name in ['pinfo']:
if s0[name] is None:
assert_(s1[name] is None, name)
else:
assert_(len(s0[name]) == len(s1[name]), name)
for p1, p2 in zip(s0[name], s1[name]):
assert_(all(p1 == p2), name)
if mode == 'exact':
for name in ['inuse', 'vertno', 'use_tris']:
assert_array_equal(s0[name], s1[name], err_msg=name)
for name in ['dist_limit']:
assert_(s0[name] == s1[name], name)
for name in ['dist']:
if s0[name] is not None:
assert_equal(s1[name].shape, s0[name].shape)
assert_(len((s0['dist'] - s1['dist']).data) == 0)
else: # 'approx' in mode:
# deal with vertno, inuse, and use_tris carefully
for ii, s in enumerate((s0, s1)):
assert_array_equal(s['vertno'], np.where(s['inuse'])[0],
'src%s[%s]["vertno"] != '
'np.where(src%s[%s]["inuse"])[0]'
% (ii, si, ii, si))
assert_equal(len(s0['vertno']), len(s1['vertno']))
agreement = np.mean(s0['inuse'] == s1['inuse'])
assert_(agreement >= 0.99, "%s < 0.99" % agreement)
if agreement < 1.0:
# make sure mismatched vertno are within 1.5mm
v0 = np.setdiff1d(s0['vertno'], s1['vertno'])
v1 = np.setdiff1d(s1['vertno'], s0['vertno'])
dists = cdist(s0['rr'][v0], s1['rr'][v1])
assert_allclose(np.min(dists, axis=1), np.zeros(len(v0)),
atol=dist_tol, err_msg='mismatched vertno')
if s0['use_tris'] is not None: # for "spacing"
assert_array_equal(s0['use_tris'].shape, s1['use_tris'].shape)
else:
assert_(s1['use_tris'] is None)
assert_(np.mean(s0['use_tris'] == s1['use_tris']) > 0.99)
# The above "if s0[name] is not None" can be removed once the sample
# dataset is updated to have a source space with distance info
for name in ['working_dir', 'command_line']:
if mode == 'exact':
assert_equal(src0.info[name], src1.info[name])
else: # 'approx' in mode:
if name in src0.info:
assert_(name in src1.info, '"%s" missing' % name)
else:
assert_(name not in src1.info, '"%s" should not exist' % name)
def _set_source_space_vertices(src, vertices):
"""Reset the list of source space vertices."""
assert len(src) == len(vertices)
for s, v in zip(src, vertices):
s['inuse'].fill(0)
s['nuse'] = len(v)
s['vertno'] = np.array(v)
s['inuse'][s['vertno']] = 1
s['use_tris'] = np.array([[]], int)
s['nuse_tri'] = np.array([0])
# This will fix 'patch_info' and 'pinfo'
_adjust_patch_info(s, verbose=False)
return src
def _get_src_nn(s, use_cps=True, vertices=None):
vertices = s['vertno'] if vertices is None else vertices
if use_cps and s.get('patch_inds') is not None:
nn = np.empty((len(vertices), 3))
for vp, p in enumerate(np.searchsorted(s['vertno'], vertices)):
assert(s['vertno'][p] == vertices[vp])
# Project out the surface normal and compute SVD
nn[vp] = np.sum(
s['nn'][s['pinfo'][s['patch_inds'][p]], :], axis=0)
nn /= np.linalg.norm(nn, axis=-1, keepdims=True)
else:
nn = s['nn'][vertices, :]
return nn
@verbose
def compute_distance_to_sensors(src, info, picks=None, trans=None,
verbose=None):
"""Compute distances between vertices and sensors.
Parameters
----------
src : instance of SourceSpaces
The object with vertex positions for which to compute distances to
sensors.
%(info)s Must contain sensor positions to which distances shall
be computed.
%(picks_good_data)s
%(trans_not_none)s
%(verbose)s
Returns
-------
depth : array of shape (n_vertices, n_channels)
The Euclidean distances of source space vertices with respect to
sensors.
"""
from scipy.spatial.distance import cdist
assert isinstance(src, SourceSpaces)
_validate_type(info, (Info,), 'info')
# Load the head<->MRI transform if necessary
if src[0]['coord_frame'] == FIFF.FIFFV_COORD_MRI:
src_trans, _ = _get_trans(trans, allow_none=False)
else:
src_trans = Transform('head', 'head') # Identity transform
# get vertex position in same coordinates as for sensors below
src_pos = np.vstack([
apply_trans(src_trans, s['rr'][s['inuse'].astype(np.bool)])
for s in src
])
# Select channels to be used for distance calculations
picks = _picks_to_idx(info, picks, 'data', exclude=())
# get sensor positions
sensor_pos = []
dev_to_head = None
for ch in picks:
# MEG channels are in device coordinates, translate them to head
if channel_type(info, ch) in ['mag', 'grad']:
if dev_to_head is None:
dev_to_head = _ensure_trans(info['dev_head_t'],
'meg', 'head')
sensor_pos.append(apply_trans(dev_to_head,
info['chs'][ch]['loc'][:3]))
else:
sensor_pos.append(info['chs'][ch]['loc'][:3])
sensor_pos = np.array(sensor_pos)
depths = cdist(src_pos, sensor_pos)
return depths
|
pravsripad/mne-python
|
mne/source_space.py
|
Python
|
bsd-3-clause
| 111,172
|
[
"Mayavi"
] |
b6f1c3fa79b8cc3d6f32f7779b3182e03793382f44022482e04a7d7ccd201203
|
# Copyright (C) 2014 MediaMath, Inc. <http://www.mediamath.com>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sqlite_backend as sql_backend
import table_merger
import util
import qasino_table
import logging
import time
import re
import yaml
import sys
import thread
from twisted.internet import threads
from twisted.internet import task
from twisted.internet import reactor
class DataManager(object):
def __init__(self, use_dbfile, db_dir=None, signal_channel=None, archive_db_dir=None,
generation_duration_s=30):
self.saved_tables = {}
self.query_id = 0
self.views = {}
self.thread_id = thread.get_ident()
self.stats = {}
self.generation_duration_s = generation_duration_s
self.signal_channel = signal_channel
self.archive_db_dir = archive_db_dir
self.static_db_filepath = db_dir + '/qasino_table_store_static.db'
# Start with zero because we'll call rotate_dbs instantly below.
self.db_generation_number = 0
# use_dbfile can be:
# 'memory- -> use in memory db
# /%d/ -> use the provided template filename
# ! /%d/ -> use the filename (same db every generation)
self.one_db = False
self.db_name = use_dbfile
if use_dbfile == None:
self.db_name = "qasino_table_store_%d.db"
elif use_dbfile == "memory":
self.db_name = ":memory:"
self.one_db = True
elif use_dbfile.find('%d') == -1:
self.one_db = True
# Initialize some things
self.table_merger = table_merger.TableMerger(self)
# Add db_dir path
if db_dir != None and self.db_name != ":memory:":
self.db_name = db_dir + '/' + self.db_name
# Open the writer backend db.
db_file_name = self.db_name
if not self.one_db:
db_file_name = self.db_name % self.db_generation_number
self.sql_backend_reader = None
self.sql_backend_writer = sql_backend.SqlConnections(db_file_name,
self,
self.archive_db_dir,
self.thread_id,
self.static_db_filepath)
self.sql_backend_writer_static = sql_backend.SqlConnections(self.static_db_filepath,
self,
self.archive_db_dir,
self.thread_id,
None)
# Make the data manager db rotation run at fixed intervals.
# This will also immediately make the call which will make the
# writer we just opened the reader and to open a new writer.
self.rotate_task = task.LoopingCall(self.async_rotate_dbs)
self.rotate_task.start(self.generation_duration_s)
def read_views(self, filename):
# Reset views
self.views = {}
try:
fh = open(filename, "r")
except Exception as e:
logging.info("Failed to open views file '%s': %s", filename, e)
return
try:
view_conf_obj = yaml.load(fh)
except Exception as e:
logging.info("Failed to parse view conf yaml file '%s': %s", filename, e)
return
for view in view_conf_obj:
try:
viewname = view["viewname"]
view = view["view"]
self.views[viewname] = { 'view' : view, 'loaded' : False, 'error' : '' }
except Exception as e:
logging.info("Failure getting view '%s': %s", view["viewname"] if "viewname" in view else 'unknown', e)
def get_query_id(self):
self.query_id += 1
return self.query_id
def shutdown(self):
self.rotate_task = None
self.sql_backend_reader = None
self.sql_backend_writer = None
def async_validate_and_route_query(self, sql, query_id, use_write_db=False):
if use_write_db:
return self.sql_backend_writer.run_interaction(sql_backend.SqlConnections.WRITER_INTERACTION,
self.validate_and_route_query, sql, query_id, self.sql_backend_writer)
else:
return self.sql_backend_reader.run_interaction(sql_backend.SqlConnections.READER_INTERACTION,
self.validate_and_route_query, sql, query_id, self.sql_backend_reader)
def validate_and_route_query(self, txn, sql, query_id, sql_backend):
# So when dbs rotate we'll force a shutdown of the backend
# after a certain amount of time to avoid hung or long running
# things in this code path from holding dbs open. This
# may/will invalidate references we might have in here so wrap
# it all in a try catch...
try:
m = re.search(r"^\s*select\s+", sql, flags=re.IGNORECASE)
if m == None:
# Process a non-select statement.
return self.process_non_select(txn, sql, query_id, sql_backend)
# Process a select statement.
return sql_backend.do_select(txn, sql)
except Exception as e:
msg = "Exception in validate_and_route_query: {}".format(str(e))
logging.info(msg)
return { "retval" : 0, "error_message" : msg }
def process_non_select(self, txn, sql, query_id, sql_backend):
"""
Called for non-select statements like show tables and desc.
"""
# DESC?
m = re.search(r"^\s*desc\s+(\S+)\s*;$", sql, flags=re.IGNORECASE)
if m != None:
(retval, error_message, table) = sql_backend.do_desc(txn, m.group(1))
result = { "retval" : retval }
if error_message:
result["error_message"] = error_message
if table:
result["data"] = table
return result
# DESC VIEW?
m = re.search(r"^\s*desc\s+view\s+(\S+)\s*;$", sql, flags=re.IGNORECASE)
if m != None:
return sql_backend.do_select(txn, "SELECT view FROM qasino_server_views WHERE viewname = '%s';" % m.group(1))
# SHOW tables?
m = re.search(r"^\s*show\s+tables\s*;$", sql, flags=re.IGNORECASE)
if m != None:
return sql_backend.do_select(txn, "SELECT *, strftime('%Y-%m-%d %H:%M:%f UTC', last_update_epoch, 'unixepoch') last_update_datetime FROM qasino_server_tables ORDER BY tablename;")
# SHOW tables with LIKE?
m = re.search(r"^\s*show\s+tables\s+like\s+('\S+')\s*;$", sql, flags=re.IGNORECASE)
if m != None:
return sql_backend.do_select(txn, "SELECT *, strftime('%Y-%m-%d %H:%M:%f UTC', last_update_epoch, 'unixepoch') last_update_datetime FROM qasino_server_tables WHERE tablename LIKE {} ORDER BY tablename;".format(m.group(1)) )
# SHOW connections?
m = re.search(r"^\s*show\s+connections\s*;$", sql, flags=re.IGNORECASE)
if m != None:
return sql_backend.do_select(txn, "SELECT *, strftime('%Y-%m-%d %H:%M:%f UTC', last_update_epoch, 'unixepoch') last_update_datetime FROM qasino_server_connections ORDER BY identity;")
# SHOW info?
m = re.search(r"^\s*show\s+info\s*;$", sql, flags=re.IGNORECASE)
if m != None:
return sql_backend.do_select(txn, "SELECT *, strftime('%Y-%m-%d %H:%M:%f UTC', generation_start_epoch, 'unixepoch') generation_start_datetime FROM qasino_server_info;")
# SHOW views?
m = re.search(r"^\s*show\s+views\s*;$", sql, flags=re.IGNORECASE)
if m != None:
return sql_backend.do_select(txn, "SELECT viewname, loaded, errormsg FROM qasino_server_views ORDER BY viewname;")
# Exit?
m = re.search(r"^\s*(quit|logout|exit)\s*;$", sql, flags=re.IGNORECASE)
if m != None:
return { "retval" : 0, "error_message" : "Bye!" }
return { "retval" : 1, "error_message" : "ERROR: Unrecognized statement: %s" % sql }
def get_table_list(self):
return self.sql_backend_reader.tables
def insert_tables_table(self, txn, sql_backend_writer, sql_backend_writer_static):
table = qasino_table.QasinoTable("qasino_server_tables")
table.add_column("tablename", "varchar")
table.add_column("nr_rows", "int")
table.add_column("nr_updates", "int")
table.add_column("last_update_epoch", "int")
table.add_column("static", "int")
sql_backend_writer.add_tables_table_rows(table)
sql_backend_writer_static.add_tables_table_rows(table)
# the chicken or the egg - how do we add ourselves?
table.add_row( [ "qasino_server_tables",
table.get_nr_rows() + 1,
1,
time.time(),
0 ] )
return sql_backend_writer.add_table_data(txn, table, util.Identity.get_identity())
# This hack insures all the internal tables are inserted
# using the same sql_backend_writer and makes sure that the
# "tables" table is called last (after all the other internal
# tables are added).
def insert_internal_tables(self, txn, sql_backend_writer, sql_backend_reader, db_generation_number, time, generation_duration_s, views):
sql_backend_writer.insert_info_table(txn, db_generation_number, time, generation_duration_s)
sql_backend_writer.insert_connections_table(txn)
if sql_backend_reader != None:
sql_backend_writer.insert_sql_stats_table(txn, sql_backend_reader)
sql_backend_writer.insert_update_stats_table(txn)
# this should be second last so views can be created of any tables above.
# this means though that you can not create views of any tables below.
sql_backend_writer.add_views(txn, views)
sql_backend_writer.insert_views_table(txn, views)
# this should be last to include all the above tables
self.insert_tables_table(txn, sql_backend_writer, self.sql_backend_writer_static)
def async_rotate_dbs(self):
"""
Kick off the rotate in a sqlconnection context because we have
some internal tables and views to add before we rotate dbs.
"""
self.sql_backend_writer.run_interaction(sql_backend.SqlConnections.WRITER_INTERACTION, self.rotate_dbs)
def rotate_dbs(self, txn):
"""
Make the db being written to be the reader db.
Open a new writer db for all new updates.
"""
logging.info("**** DataManager: Starting generation %d", self.db_generation_number)
# Before making the write db the read db,
# add various internal info tables and views.
self.insert_internal_tables(txn,
self.sql_backend_writer,
self.sql_backend_reader,
self.db_generation_number,
time.time(),
self.generation_duration_s,
self.views)
# Increment the generation number.
self.db_generation_number = int(time.time())
# Set the writer to a new db
save_sql_backend_writer = self.sql_backend_writer
# If specified put the generation number in the db name.
db_file_name = self.db_name
if not self.one_db:
db_file_name = self.db_name % self.db_generation_number
self.sql_backend_writer = sql_backend.SqlConnections(db_file_name,
self,
self.archive_db_dir,
self.thread_id,
self.static_db_filepath)
# Set the reader to what was the writer
# Note the reader will (should) be deconstructed here.
# Just in case something else is holding a ref to the reader
# (indefinitely!?) force a shutdown of this backend after a
# certain amount of time though.
if self.sql_backend_reader:
reactor.callLater(self.generation_duration_s * 3,
sql_backend.SqlConnections.shutdown,
self.sql_backend_reader.writer_dbpool,
self.sql_backend_reader.filename,
None)
reactor.callLater(self.generation_duration_s * 3,
sql_backend.SqlConnections.shutdown,
self.sql_backend_reader.reader_dbpool,
self.sql_backend_reader.filename,
self.sql_backend_reader.archive_db_dir)
self.sql_backend_reader = save_sql_backend_writer
# Load saved tables.
self.async_add_saved_tables()
# Lastly blast out the generation number.
if self.signal_channel != None:
self.signal_channel.send_generation_signal(self.db_generation_number, self.generation_duration_s)
def check_save_table(self, table, identity):
tablename = table.get_tablename()
key = tablename + identity
if table.get_property('persist'):
self.saved_tables[key] = { "table" : table, "tablename" : tablename, "identity" : identity }
else:
# Be sure to remove a table that is no longer persisting.
if key in self.saved_tables:
del self.saved_tables[key]
def async_add_saved_tables(self):
for key, table_data in self.saved_tables.iteritems():
logging.info("DataManager: Adding saved table '%s' from '%s'", table_data["tablename"], table_data["identity"])
self.sql_backend_writer.async_add_table_data(table_data["table"], table_data["identity"])
|
MediaMath/qasino
|
lib/data_manager.py
|
Python
|
apache-2.0
| 15,026
|
[
"BLAST"
] |
117d5ca8ec3ae577b04b2c2c2e78c41750ce60d1760c0e3cc7e62d83aabfdb4e
|
# -*- coding: utf-8 -*-
##
## This file is part of Invenio.
## Copyright (C) 2011, 2012 CERN.
##
## Invenio is free software; you can redistribute it and/or
## modify it under the terms of the GNU General Public License as
## published by the Free Software Foundation; either version 2 of the
## License, or (at your option) any later version.
##
## Invenio is distributed in the hope that it will be useful, but
## WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
## General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with Invenio; if not, write to the Free Software Foundation, Inc.,
## 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
""" Bibauthorid Web Interface Logic and URL handler. """
# pylint: disable=W0105
# pylint: disable=C0301
# pylint: disable=W0613
from cgi import escape
from pprint import pformat
from operator import itemgetter
import re
try:
from invenio.jsonutils import json, json_unicode_to_utf8, CFG_JSON_AVAILABLE
except ImportError:
CFG_JSON_AVAILABLE = False
json = None
from invenio.bibauthorid_webapi import add_cname_to_hepname_record
from invenio.config import CFG_SITE_URL, CFG_BASE_URL
from invenio.bibauthorid_config import AID_ENABLED, PERSON_SEARCH_RESULTS_SHOW_PAPERS_PERSON_LIMIT, \
BIBAUTHORID_UI_SKIP_ARXIV_STUB_PAGE, VALID_EXPORT_FILTERS, PERSONS_PER_PAGE, \
MAX_NUM_SHOW_PAPERS
from invenio.config import CFG_SITE_LANG, CFG_SITE_URL, CFG_SITE_NAME, CFG_INSPIRE_SITE, CFG_SITE_SECURE_URL
from invenio.bibauthorid_name_utils import most_relevant_name
from invenio.webpage import page, pageheaderonly, pagefooteronly
from invenio.messages import gettext_set_language # , wash_language
from invenio.template import load
from invenio.webinterface_handler import wash_urlargd, WebInterfaceDirectory
from invenio.session import get_session
from invenio.urlutils import redirect_to_url, get_canonical_and_alternates_urls
from invenio.webuser import (getUid,
page_not_authorized,
collect_user_info,
set_user_preferences,
get_user_preferences,
email_valid_p,
emailUnique,
get_email_from_username,
get_uid_from_email,
isGuestUser)
from invenio.access_control_admin import acc_get_user_roles
from invenio.search_engine import perform_request_search
from invenio.search_engine_utils import get_fieldvalues
from invenio.bibauthorid_config import CREATE_NEW_PERSON
import invenio.webinterface_handler_config as apache
import invenio.webauthorprofile_interface as webauthorapi
import invenio.bibauthorid_webapi as webapi
from invenio.bibauthorid_general_utils import get_title_of_doi, get_title_of_arxiv_pubid, is_valid_orcid
from invenio.bibauthorid_backinterface import update_external_ids_of_authors, get_orcid_id_of_author, \
get_validated_request_tickets_for_author, get_title_of_paper, get_claimed_papers_of_author
from invenio.bibauthorid_dbinterface import defaultdict, remove_arxiv_papers_of_author
from invenio.webauthorprofile_orcidutils import get_dois_from_orcid
from invenio.bibauthorid_webauthorprofileinterface import is_valid_canonical_id, get_person_id_from_canonical_id, \
get_person_redirect_link, author_has_papers
from invenio.bibauthorid_templates import WebProfileMenu, WebProfilePage
# Imports related to hepnames update form
from invenio.bibedit_utils import get_bibrecord
from invenio.bibrecord import record_get_field_value, record_get_field_values, \
record_get_field_instances, field_get_subfield_values
TEMPLATE = load('bibauthorid')
class WebInterfaceBibAuthorIDClaimPages(WebInterfaceDirectory):
'''
Handles /author/claim pages and AJAX requests.
Supplies the methods:
/author/claim/<string>
/author/claim/action
/author/claim/claimstub
/author/claim/export
/author/claim/generate_autoclaim_data
/author/claim/merge_profiles_ajax
/author/claim/search_box_ajax
/author/claim/tickets_admin
/author/claim/search
'''
_exports = ['',
'action',
'claimstub',
'export',
'generate_autoclaim_data',
'merge_profiles_ajax',
'search_box_ajax',
'tickets_admin'
]
def _lookup(self, component, path):
'''
This handler parses dynamic URLs:
- /author/profile/1332 shows the page of author with id: 1332
- /author/profile/100:5522,1431 shows the page of the author
identified by the bibrefrec: '100:5522,1431'
'''
if not component in self._exports:
return WebInterfaceBibAuthorIDClaimPages(component), path
def _is_profile_owner(self, pid):
return self.person_id == int(pid)
def _is_admin(self, pinfo):
return pinfo['ulevel'] == 'admin'
def __init__(self, identifier=None):
'''
Constructor of the web interface.
@param identifier: identifier of an author. Can be one of:
- an author id: e.g. "14"
- a canonical id: e.g. "J.R.Ellis.1"
- a bibrefrec: e.g. "100:1442,155"
@type identifier: str
'''
self.person_id = -1 # -1 is a non valid author identifier
if identifier is None or not isinstance(identifier, str):
return
# check if it's a canonical id: e.g. "J.R.Ellis.1"
pid = int(webapi.get_person_id_from_canonical_id(identifier))
if pid >= 0:
self.person_id = pid
return
# check if it's an author id: e.g. "14"
try:
pid = int(identifier)
if webapi.author_has_papers(pid):
self.person_id = pid
return
except ValueError:
pass
# check if it's a bibrefrec: e.g. "100:1442,155"
if webapi.is_valid_bibref(identifier):
pid = int(webapi.get_person_id_from_paper(identifier))
if pid >= 0:
self.person_id = pid
return
def __call__(self, req, form):
'''
Serve the main person page.
Will use the object's person id to get a person's information.
@param req: apache request object
@type req: apache request object
@param form: POST/GET variables of the request
@type form: dict
@return: a full page formatted in HTML
@rtype: str
'''
webapi.session_bareinit(req)
session = get_session(req)
pinfo = session['personinfo']
ulevel = pinfo['ulevel']
argd = wash_urlargd(form, {'ln': (str, CFG_SITE_LANG),
'open_claim': (str, None),
'ticketid': (int, -1),
'verbose': (int, 0)})
debug = "verbose" in argd and argd["verbose"] > 0
ln = argd['ln']
req.argd = argd # needed for perform_req_search
if self.person_id < 0:
return redirect_to_url(req, '%s/author/search' % (CFG_SITE_URL))
no_access = self._page_access_permission_wall(req, [self.person_id])
if no_access:
return no_access
pinfo['claim_in_process'] = True
user_info = collect_user_info(req)
user_info['precached_viewclaimlink'] = pinfo['claim_in_process']
session.dirty = True
if self.person_id != -1:
pinfo['claimpaper_admin_last_viewed_pid'] = self.person_id
rt_ticket_id = argd['ticketid']
if rt_ticket_id != -1:
pinfo["admin_requested_ticket_id"] = rt_ticket_id
session.dirty = True
## Create menu and page using templates
cname = webapi.get_canonical_id_from_person_id(self.person_id)
menu = WebProfileMenu(str(cname), "claim", ln, self._is_profile_owner(pinfo['pid']), self._is_admin(pinfo))
profile_page = WebProfilePage("claim", webapi.get_longest_name_from_pid(self.person_id))
profile_page.add_profile_menu(menu)
gboxstatus = self.person_id
gpid = self.person_id
gNumOfWorkers = 3 # to do: read it from conf file
gReqTimeout = 3000
gPageTimeout = 12000
profile_page.add_bootstrapped_data(json.dumps({
"other": "var gBOX_STATUS = '%s';var gPID = '%s'; var gNumOfWorkers= '%s'; var gReqTimeout= '%s'; var gPageTimeout= '%s';" % (gboxstatus, gpid, gNumOfWorkers, gReqTimeout, gPageTimeout),
"backbone": """
(function(ticketbox) {
var app = ticketbox.app;
app.userops.set(%s);
app.bodyModel.set({userLevel: "%s", guestPrompt: true});
})(ticketbox);""" % (WebInterfaceAuthorTicketHandling.bootstrap_status(pinfo, "user"), ulevel)
}))
if debug:
profile_page.add_debug_info(pinfo)
# content += self._generate_person_info_box(ulevel, ln) #### Name variants
# metaheaderadd = self._scripts() + '\n <meta name="robots" content="nofollow" />'
# body = self._generate_optional_menu(ulevel, req, form)
content = self._generate_tabs(ulevel, req)
content += self._generate_footer(ulevel)
content = content.decode('utf-8', 'strict')
webapi.history_log_visit(req, 'claim', pid=self.person_id)
return page(title=self._generate_title(ulevel),
metaheaderadd=profile_page.get_head().encode('utf-8'),
body=profile_page.get_wrapped_body(content).encode('utf-8'),
req=req,
language=ln,
show_title_p=False)
def _page_access_permission_wall(self, req, req_pid=None, req_level=None):
'''
Display an error page if user not authorized to use the interface.
@param req: Apache Request Object for session management
@type req: Apache Request Object
@param req_pid: Requested person id
@type req_pid: int
@param req_level: Request level required for the page
@type req_level: string
'''
session = get_session(req)
uid = getUid(req)
pinfo = session["personinfo"]
uinfo = collect_user_info(req)
if 'ln' in pinfo:
ln = pinfo["ln"]
else:
ln = CFG_SITE_LANG
_ = gettext_set_language(ln)
is_authorized = True
pids_to_check = []
if not AID_ENABLED:
return page_not_authorized(req, text=_("Fatal: Author ID capabilities are disabled on this system."))
if req_level and 'ulevel' in pinfo and pinfo["ulevel"] != req_level:
return page_not_authorized(req, text=_("Fatal: You are not allowed to access this functionality."))
if req_pid and not isinstance(req_pid, list):
pids_to_check = [req_pid]
elif req_pid and isinstance(req_pid, list):
pids_to_check = req_pid
if (not (uinfo['precached_usepaperclaim']
or uinfo['precached_usepaperattribution'])
and 'ulevel' in pinfo
and not pinfo["ulevel"] == "admin"):
is_authorized = False
if is_authorized and not webapi.user_can_view_CMP(uid):
is_authorized = False
if is_authorized and 'ticket' in pinfo:
for tic in pinfo["ticket"]:
if 'pid' in tic:
pids_to_check.append(tic['pid'])
if pids_to_check and is_authorized:
user_pid = webapi.get_pid_from_uid(uid)
if not uinfo['precached_usepaperattribution']:
if (not user_pid in pids_to_check
and 'ulevel' in pinfo
and not pinfo["ulevel"] == "admin"):
is_authorized = False
elif (user_pid in pids_to_check
and 'ulevel' in pinfo
and not pinfo["ulevel"] == "admin"):
for tic in list(pinfo["ticket"]):
if not tic["pid"] == user_pid:
pinfo['ticket'].remove(tic)
if not is_authorized:
return page_not_authorized(req, text=_("Fatal: You are not allowed to access this functionality."))
else:
return ""
def _generate_title(self, ulevel):
'''
Generates the title for the specified user permission level.
@param ulevel: user permission level
@type ulevel: str
@return: title
@rtype: str
'''
def generate_title_guest():
title = 'Assign papers'
if self.person_id:
title = 'Assign papers for: ' + str(webapi.get_person_redirect_link(self.person_id))
return title
def generate_title_user():
title = 'Assign papers'
if self.person_id:
title = 'Assign papers (user interface) for: ' + str(webapi.get_person_redirect_link(self.person_id))
return title
def generate_title_admin():
title = 'Assign papers'
if self.person_id:
title = 'Assign papers (administrator interface) for: ' + str(webapi.get_person_redirect_link(self.person_id))
return title
generate_title = {'guest': generate_title_guest,
'user': generate_title_user,
'admin': generate_title_admin}
return generate_title[ulevel]()
def _generate_optional_menu(self, ulevel, req, form):
'''
Generates the menu for the specified user permission level.
@param ulevel: user permission level
@type ulevel: str
@param req: apache request object
@type req: apache request object
@param form: POST/GET variables of the request
@type form: dict
@return: menu
@rtype: str
'''
def generate_optional_menu_guest(req, form):
argd = wash_urlargd(form, {'ln': (str, CFG_SITE_LANG),
'verbose': (int, 0)})
menu = TEMPLATE.tmpl_person_menu(self.person_id, argd['ln'])
if "verbose" in argd and argd["verbose"] > 0:
session = get_session(req)
pinfo = session['personinfo']
menu += "\n<pre>" + pformat(pinfo) + "</pre>\n"
return menu
def generate_optional_menu_user(req, form):
argd = wash_urlargd(form, {'ln': (str, CFG_SITE_LANG),
'verbose': (int, 0)})
menu = TEMPLATE.tmpl_person_menu(self.person_id, argd['ln'])
if "verbose" in argd and argd["verbose"] > 0:
session = get_session(req)
pinfo = session['personinfo']
menu += "\n<pre>" + pformat(pinfo) + "</pre>\n"
return menu
def generate_optional_menu_admin(req, form):
argd = wash_urlargd(form, {'ln': (str, CFG_SITE_LANG),
'verbose': (int, 0)})
menu = TEMPLATE.tmpl_person_menu_admin(self.person_id, argd['ln'])
if "verbose" in argd and argd["verbose"] > 0:
session = get_session(req)
pinfo = session['personinfo']
menu += "\n<pre>" + pformat(pinfo) + "</pre>\n"
return menu
generate_optional_menu = {'guest': generate_optional_menu_guest,
'user': generate_optional_menu_user,
'admin': generate_optional_menu_admin}
return "<div class=\"clearfix\">" + generate_optional_menu[ulevel](req, form) + "</div>"
def _generate_ticket_box(self, ulevel, req):
'''
Generates the semi-permanent info box for the specified user permission
level.
@param ulevel: user permission level
@type ulevel: str
@param req: apache request object
@type req: apache request object
@return: info box
@rtype: str
'''
def generate_ticket_box_guest(req):
session = get_session(req)
pinfo = session['personinfo']
ticket = pinfo['ticket']
results = list()
pendingt = list()
for t in ticket:
if 'execution_result' in t:
for res in t['execution_result']:
results.append(res)
else:
pendingt.append(t)
box = ""
if pendingt:
box += TEMPLATE.tmpl_ticket_box('in_process', 'transaction', len(pendingt))
if results:
failed = [messages for status, messages in results if not status]
if failed:
box += TEMPLATE.tmpl_transaction_box('failure', failed)
successfull = [messages for status, messages in results if status]
if successfull:
box += TEMPLATE.tmpl_transaction_box('success', successfull)
return box
def generate_ticket_box_user(req):
return generate_ticket_box_guest(req)
def generate_ticket_box_admin(req):
return generate_ticket_box_guest(req)
generate_ticket_box = {'guest': generate_ticket_box_guest,
'user': generate_ticket_box_user,
'admin': generate_ticket_box_admin}
return generate_ticket_box[ulevel](req)
def _generate_person_info_box(self, ulevel, ln):
'''
Generates the name info box for the specified user permission level.
@param ulevel: user permission level
@type ulevel: str
@param ln: page display language
@type ln: str
@return: name info box
@rtype: str
'''
def generate_person_info_box_guest(ln):
names = webapi.get_person_names_from_id(self.person_id)
box = TEMPLATE.tmpl_admin_person_info_box(ln, person_id=self.person_id,
names=names)
return box
def generate_person_info_box_user(ln):
return generate_person_info_box_guest(ln)
def generate_person_info_box_admin(ln):
return generate_person_info_box_guest(ln)
generate_person_info_box = {'guest': generate_person_info_box_guest,
'user': generate_person_info_box_user,
'admin': generate_person_info_box_admin}
return generate_person_info_box[ulevel](ln)
def _generate_tabs(self, ulevel, req):
'''
Generates the tabs content for the specified user permission level.
@param ulevel: user permission level
@type ulevel: str
@param req: apache request object
@type req: apache request object
@return: tabs content
@rtype: str
'''
from invenio.bibauthorid_templates import verbiage_dict as tmpl_verbiage_dict
from invenio.bibauthorid_templates import buttons_verbiage_dict as tmpl_buttons_verbiage_dict
def generate_tabs_guest(req):
links = list() # ['delete', 'commit','del_entry','commit_entry']
tabs = ['records', 'repealed', 'review']
return generate_tabs_admin(req, show_tabs=tabs, ticket_links=links,
open_tickets=list(),
verbiage_dict=tmpl_verbiage_dict['guest'],
buttons_verbiage_dict=tmpl_buttons_verbiage_dict['guest'],
show_reset_button=False)
def generate_tabs_user(req):
links = ['delete', 'del_entry']
tabs = ['records', 'repealed', 'review', 'tickets']
session = get_session(req)
pinfo = session['personinfo']
uid = getUid(req)
user_is_owner = 'not_owner'
if pinfo["claimpaper_admin_last_viewed_pid"] == webapi.get_pid_from_uid(uid):
user_is_owner = 'owner'
open_tickets = webapi.get_person_request_ticket(self.person_id)
tickets = list()
for t in open_tickets:
owns = False
for row in t[0]:
if row[0] == 'uid-ip' and row[1].split('||')[0] == str(uid):
owns = True
if owns:
tickets.append(t)
return generate_tabs_admin(req, show_tabs=tabs, ticket_links=links,
open_tickets=tickets,
verbiage_dict=tmpl_verbiage_dict['user'][user_is_owner],
buttons_verbiage_dict=tmpl_buttons_verbiage_dict['user'][user_is_owner])
def generate_tabs_admin(req, show_tabs=['records', 'repealed', 'review', 'comments', 'tickets', 'data'],
ticket_links=['delete', 'commit', 'del_entry', 'commit_entry'], open_tickets=None,
verbiage_dict=None, buttons_verbiage_dict=None, show_reset_button=True):
session = get_session(req)
personinfo = dict()
try:
personinfo = session["personinfo"]
except KeyError:
return ""
if 'ln' in personinfo:
ln = personinfo["ln"]
else:
ln = CFG_SITE_LANG
all_papers = webapi.get_papers_by_person_id(self.person_id, ext_out=True)
records = [{'recid': paper[0],
'bibref': paper[1],
'flag': paper[2],
'authorname': paper[3],
'authoraffiliation': paper[4],
'paperdate': paper[5],
'rt_status': paper[6],
'paperexperiment': paper[7]} for paper in all_papers]
rejected_papers = [row for row in records if row['flag'] < -1]
rest_of_papers = [row for row in records if row['flag'] >= -1]
review_needed = webapi.get_review_needing_records(self.person_id)
if len(review_needed) < 1:
if 'review' in show_tabs:
show_tabs.remove('review')
if open_tickets == None:
open_tickets = webapi.get_person_request_ticket(self.person_id)
else:
if len(open_tickets) < 1 and 'tickets' in show_tabs:
show_tabs.remove('tickets')
rt_tickets = None
if "admin_requested_ticket_id" in personinfo:
rt_tickets = personinfo["admin_requested_ticket_id"]
if verbiage_dict is None:
verbiage_dict = translate_dict_values(tmpl_verbiage_dict['admin'], ln)
if buttons_verbiage_dict is None:
buttons_verbiage_dict = translate_dict_values(tmpl_buttons_verbiage_dict['admin'], ln)
# send data to the template function
tabs = TEMPLATE.tmpl_admin_tabs(ln, person_id=self.person_id,
rejected_papers=rejected_papers,
rest_of_papers=rest_of_papers,
review_needed=review_needed,
rt_tickets=rt_tickets,
open_rt_tickets=open_tickets,
show_tabs=show_tabs,
ticket_links=ticket_links,
verbiage_dict=verbiage_dict,
buttons_verbiage_dict=buttons_verbiage_dict,
show_reset_button=show_reset_button)
return tabs
def translate_dict_values(dictionary, ln):
def translate_str_values(dictionary, f=lambda x: x):
translated_dict = dict()
for key, value in dictionary.iteritems():
if isinstance(value, str):
translated_dict[key] = f(value)
elif isinstance(value, dict):
translated_dict[key] = translate_str_values(value, f)
else:
raise TypeError("Value should be either string or dictionary.")
return translated_dict
return translate_str_values(dictionary, f=gettext_set_language(ln))
generate_tabs = {'guest': generate_tabs_guest,
'user': generate_tabs_user,
'admin': generate_tabs_admin}
return generate_tabs[ulevel](req)
def _generate_footer(self, ulevel):
'''
Generates the footer for the specified user permission level.
@param ulevel: user permission level
@type ulevel: str
@return: footer
@rtype: str
'''
def generate_footer_guest():
return TEMPLATE.tmpl_invenio_search_box()
def generate_footer_user():
return generate_footer_guest()
def generate_footer_admin():
return generate_footer_guest()
generate_footer = {'guest': generate_footer_guest,
'user': generate_footer_user,
'admin': generate_footer_admin}
return generate_footer[ulevel]()
def _ticket_dispatch_end(self, req):
'''
The ticket dispatch is finished, redirect to the original page of
origin or to the last_viewed_pid or return to the papers autoassigned box to populate its data
'''
session = get_session(req)
pinfo = session["personinfo"]
webapi.session_bareinit(req)
if 'claim_in_process' in pinfo:
pinfo['claim_in_process'] = False
if "merge_ticket" in pinfo and pinfo['merge_ticket']:
pinfo['merge_ticket'] = []
user_info = collect_user_info(req)
user_info['precached_viewclaimlink'] = True
session.dirty = True
if "referer" in pinfo and pinfo["referer"]:
referer = pinfo["referer"]
del(pinfo["referer"])
session.dirty = True
return redirect_to_url(req, referer)
# if we are coming fromt he autoclaim box we should not redirect and just return to the caller function
if 'autoclaim' in pinfo and pinfo['autoclaim']['review_failed'] == False and pinfo['autoclaim']['begin_autoclaim'] == True:
pinfo['autoclaim']['review_failed'] = False
pinfo['autoclaim']['begin_autoclaim'] = False
session.dirty = True
else:
redirect_page = webapi.history_get_last_visited_url(pinfo['visit_diary'], limit_to_page=['manage_profile', 'claim'])
if not redirect_page:
redirect_page = webapi.get_fallback_redirect_link(req)
if 'autoclaim' in pinfo and pinfo['autoclaim']['review_failed'] == True and pinfo['autoclaim']['checkout'] == True:
redirect_page = '%s/author/claim/action?checkout=True' % (CFG_SITE_URL,)
pinfo['autoclaim']['checkout'] = False
session.dirty = True
elif not 'manage_profile' in redirect_page:
pinfo['autoclaim']['review_failed'] = False
pinfo['autoclaim']['begin_autoclaim'] == False
pinfo['autoclaim']['checkout'] = True
session.dirty = True
redirect_page = '%s/author/claim/%s?open_claim=True' % (CFG_SITE_URL, webapi.get_person_redirect_link(pinfo["claimpaper_admin_last_viewed_pid"]))
else:
pinfo['autoclaim']['review_failed'] = False
pinfo['autoclaim']['begin_autoclaim'] == False
pinfo['autoclaim']['checkout'] = True
session.dirty = True
return redirect_to_url(req, redirect_page)
# redirect_link = diary('get_redirect_link', caller='_ticket_dispatch_end', parameters=[('open_claim','True')])
# return redirect_to_url(req, redirect_link)
# need review if should be deleted
def __user_is_authorized(self, req, action):
'''
Determines if a given user is authorized to perform a specified action
@param req: Apache Request Object
@type req: Apache Request Object
@param action: the action the user wants to perform
@type action: string
@return: True if user is allowed to perform the action, False if not
@rtype: boolean
'''
if not req:
return False
if not action:
return False
else:
action = escape(action)
uid = getUid(req)
if not isinstance(uid, int):
return False
if uid == 0:
return False
allowance = [i[1] for i in acc_find_user_role_actions({'uid': uid})
if i[1] == action]
if allowance:
return True
return False
@staticmethod
def _scripts(kill_browser_cache=False):
'''
Returns html code to be included in the meta header of the html page.
The actual code is stored in the template.
@return: html formatted Javascript and CSS inclusions for the <head>
@rtype: string
'''
return TEMPLATE.tmpl_meta_includes(kill_browser_cache)
def _check_user_fields(self, req, form):
argd = wash_urlargd(
form,
{'ln': (str, CFG_SITE_LANG),
'user_first_name': (str, None),
'user_last_name': (str, None),
'user_email': (str, None),
'user_comments': (str, None)})
session = get_session(req)
pinfo = session["personinfo"]
ulevel = pinfo["ulevel"]
skip_checkout_faulty_fields = False
if ulevel in ['user', 'admin']:
skip_checkout_faulty_fields = True
if not ("user_first_name_sys" in pinfo and pinfo["user_first_name_sys"]):
if "user_first_name" in argd and argd['user_first_name']:
if not argd["user_first_name"] and not skip_checkout_faulty_fields:
pinfo["checkout_faulty_fields"].append("user_first_name")
else:
pinfo["user_first_name"] = escape(argd["user_first_name"])
if not ("user_last_name_sys" in pinfo and pinfo["user_last_name_sys"]):
if "user_last_name" in argd and argd['user_last_name']:
if not argd["user_last_name"] and not skip_checkout_faulty_fields:
pinfo["checkout_faulty_fields"].append("user_last_name")
else:
pinfo["user_last_name"] = escape(argd["user_last_name"])
if not ("user_email_sys" in pinfo and pinfo["user_email_sys"]):
if "user_email" in argd and argd['user_email']:
if not email_valid_p(argd["user_email"]):
pinfo["checkout_faulty_fields"].append("user_email")
else:
pinfo["user_email"] = escape(argd["user_email"])
if (ulevel == "guest"
and emailUnique(argd["user_email"]) > 0):
pinfo["checkout_faulty_fields"].append("user_email_taken")
else:
pinfo["checkout_faulty_fields"].append("user_email")
if "user_comments" in argd:
if argd["user_comments"]:
pinfo["user_ticket_comments"] = escape(argd["user_comments"])
else:
pinfo["user_ticket_comments"] = ""
session.dirty = True
def action(self, req, form):
'''
Initial step in processing of requests: ticket generation/update.
Also acts as action dispatcher for interface mass action requests.
Valid mass actions are:
- add_external_id: add an external identifier to an author
- add_missing_external_ids: add missing external identifiers of an author
- bibref_check_submit:
- cancel: clean the session (erase tickets and so on)
- cancel_rt_ticket:
- cancel_search_ticket:
- cancel_stage:
- checkout:
- checkout_continue_claiming:
- checkout_remove_transaction:
- checkout_submit:
- claim: claim papers for an author
- commit_rt_ticket:
- confirm: confirm assignments to an author
- delete_external_ids: delete external identifiers of an author
- repeal: repeal assignments from an author
- reset: reset assignments of an author
- set_canonical_name: set/swap the canonical name of an author
- to_other_person: assign a document from an author to another author
@param req: apache request object
@type req: apache request object
@param form: parameters sent via GET or POST request
@type form: dict
@return: a full page formatted in HTML
@return: str
'''
webapi.session_bareinit(req)
session = get_session(req)
pinfo = session["personinfo"]
argd = wash_urlargd(form,
{'autoclaim_show_review':(str, None),
'canonical_name': (str, None),
'existing_ext_ids': (list, None),
'ext_id': (str, None),
'uid': (int, None),
'ext_system': (str, None),
'ln': (str, CFG_SITE_LANG),
'pid': (int, -1),
'primary_profile':(str, None),
'search_param': (str, None),
'rt_action': (str, None),
'rt_id': (int, None),
'selection': (list, None),
# permitted actions
'add_external_id': (str, None),
'set_uid': (str, None),
'add_missing_external_ids': (str, None),
'associate_profile': (str, None),
'bibref_check_submit': (str, None),
'cancel': (str, None),
'cancel_merging': (str, None),
'cancel_rt_ticket': (str, None),
'cancel_search_ticket': (str, None),
'cancel_stage': (str, None),
'checkout': (str, None),
'checkout_continue_claiming': (str, None),
'checkout_remove_transaction': (str, None),
'checkout_submit': (str, None),
'assign': (str, None),
'commit_rt_ticket': (str, None),
'confirm': (str, None),
'delete_external_ids': (str, None),
'merge': (str, None),
'reject': (str, None),
'repeal': (str, None),
'reset': (str, None),
'send_message': (str, None),
'set_canonical_name': (str, None),
'to_other_person': (str, None)})
ulevel = pinfo["ulevel"]
ticket = pinfo["ticket"]
uid = getUid(req)
ln = argd['ln']
action = None
permitted_actions = ['add_external_id',
'set_uid',
'add_missing_external_ids',
'associate_profile',
'bibref_check_submit',
'cancel',
'cancel_merging',
'cancel_rt_ticket',
'cancel_search_ticket',
'cancel_stage',
'checkout',
'checkout_continue_claiming',
'checkout_remove_transaction',
'checkout_submit',
'assign',
'commit_rt_ticket',
'confirm',
'delete_external_ids',
'merge',
'reject',
'repeal',
'reset',
'send_message',
'set_canonical_name',
'to_other_person']
for act in permitted_actions:
# one action (the most) is enabled in the form
if argd[act] is not None:
action = act
no_access = self._page_access_permission_wall(req, None)
if no_access and action not in ["assign"]:
return no_access
# incomplete papers (incomplete paper info or other problems) trigger action function without user's interference
# in order to fix those problems and claim papers or remove them from the ticket
if (action is None
and "bibref_check_required" in pinfo
and pinfo["bibref_check_required"]):
if "bibref_check_reviewed_bibrefs" in pinfo:
del(pinfo["bibref_check_reviewed_bibrefs"])
session.dirty = True
def add_external_id():
'''
associates the user with pid to the external id ext_id
'''
if argd['pid'] > -1:
pid = argd['pid']
else:
return self._error_page(req, ln,
"Fatal: cannot add external id to unknown person")
if argd['ext_system'] is not None:
ext_sys = argd['ext_system']
else:
return self._error_page(req, ln,
"Fatal: cannot add an external id without specifying the system")
if argd['ext_id'] is not None:
ext_id = argd['ext_id']
else:
return self._error_page(req, ln,
"Fatal: cannot add a custom external id without a suggestion")
userinfo = "%s||%s" % (uid, req.remote_ip)
webapi.add_person_external_id(pid, ext_sys, ext_id, userinfo)
return redirect_to_url(req, "%s/author/manage_profile/%s" % (CFG_SITE_URL, webapi.get_person_redirect_link(pid)))
def set_uid():
'''
associates the user with pid to the external id ext_id
'''
if argd['pid'] > -1:
pid = argd['pid']
else:
return self._error_page(req, ln,
"Fatal: current user is unknown")
if argd['uid'] is not None:
dest_uid = int(argd['uid'])
else:
return self._error_page(req, ln,
"Fatal: user id is not valid")
userinfo = "%s||%s" % (uid, req.remote_ip)
webapi.set_person_uid(pid, dest_uid, userinfo)
# remove arxiv pubs of current pid
remove_arxiv_papers_of_author(pid)
dest_uid_pid = webapi.get_pid_from_uid(dest_uid)
if dest_uid_pid > -1:
# move the arxiv pubs of the dest_uid to the current pid
dest_uid_arxiv_papers = webapi.get_arxiv_papers_of_author(dest_uid_pid)
webapi.add_arxiv_papers_to_author(dest_uid_arxiv_papers, pid)
return redirect_to_url(req, "%s/author/manage_profile/%s" % (CFG_SITE_URL, webapi.get_person_redirect_link(pid)))
def add_missing_external_ids():
if argd['pid'] > -1:
pid = argd['pid']
else:
return self._error_page(req, ln,
"Fatal: cannot recompute external ids for an unknown person")
update_external_ids_of_authors([pid], overwrite=False)
return redirect_to_url(req, "%s/author/manage_profile/%s" % (CFG_SITE_URL, webapi.get_person_redirect_link(pid)))
def associate_profile():
'''
associates the user with user id to the person profile with pid
'''
if argd['pid'] > -1:
pid = argd['pid']
else:
return self._error_page(req, ln,
"Fatal: cannot associate profile without a person id.")
uid = getUid(req)
pid, profile_claimed = webapi.claim_profile(uid, pid)
redirect_pid = pid
if profile_claimed:
pinfo['pid'] = pid
pinfo['should_check_to_autoclaim'] = True
pinfo["login_info_message"] = "confirm_success"
session.dirty = True
redirect_to_url(req, '%s/author/manage_profile/%s' % (CFG_SITE_URL, redirect_pid))
# if someone have already claimed this profile it redirects to choose_profile with an error message
else:
param=''
if 'search_param' in argd and argd['search_param']:
param = '&search_param=' + argd['search_param']
redirect_to_url(req, '%s/author/choose_profile?failed=%s%s' % (CFG_SITE_URL, True, param))
def bibref_check_submit():
pinfo["bibref_check_reviewed_bibrefs"] = list()
add_rev = pinfo["bibref_check_reviewed_bibrefs"].append
if ("bibrefs_auto_assigned" in pinfo
or "bibrefs_to_confirm" in pinfo):
person_reviews = list()
if ("bibrefs_auto_assigned" in pinfo
and pinfo["bibrefs_auto_assigned"]):
person_reviews.append(pinfo["bibrefs_auto_assigned"])
if ("bibrefs_to_confirm" in pinfo
and pinfo["bibrefs_to_confirm"]):
person_reviews.append(pinfo["bibrefs_to_confirm"])
for ref_review in person_reviews:
for person_id in ref_review:
for bibrec in ref_review[person_id]["bibrecs"]:
rec_grp = "bibrecgroup%s" % bibrec
elements = list()
if rec_grp in form:
if isinstance(form[rec_grp], str):
elements.append(form[rec_grp])
elif isinstance(form[rec_grp], list):
elements += form[rec_grp]
else:
continue
for element in elements:
test = element.split("||")
if test and len(test) > 1 and test[1]:
tref = test[1] + "," + str(bibrec)
tpid = webapi.wash_integer_id(test[0])
if (webapi.is_valid_bibref(tref)
and tpid > -1):
add_rev(element + "," + str(bibrec))
session.dirty = True
def cancel():
self.__session_cleanup(req)
return self._ticket_dispatch_end(req)
def cancel_merging():
'''
empties the session out of merge content and redirects to the manage profile page
that the user was viewing before the merge
'''
if argd['primary_profile']:
primary_cname = argd['primary_profile']
else:
return self._error_page(req, ln,
"Fatal: Couldn't redirect to the previous page")
webapi.session_bareinit(req)
session = get_session(req)
pinfo = session['personinfo']
if pinfo['merge_profiles']:
pinfo['merge_profiles'] = list()
session.dirty = True
redirect_url = "%s/author/manage_profile/%s" % (CFG_SITE_URL, primary_cname)
return redirect_to_url(req, redirect_url)
def cancel_rt_ticket():
if argd['selection'] is not None:
bibrefrecs = argd['selection']
else:
return self._error_page(req, ln,
"Fatal: cannot cancel unknown ticket")
if argd['pid'] > -1:
pid = argd['pid']
else:
return self._error_page(req, ln, "Fatal: cannot cancel unknown ticket")
if argd['rt_id'] is not None and argd['rt_action'] is not None:
rt_id = int(argd['rt_id'])
rt_action = argd['rt_action']
for bibrefrec in bibrefrecs:
webapi.delete_transaction_from_request_ticket(pid, rt_id, rt_action, bibrefrec)
else:
rt_id = int(bibrefrecs[0])
webapi.delete_request_ticket(pid, rt_id)
return redirect_to_url(req, "%s/author/claim/%s" % (CFG_SITE_URL, pid))
def cancel_search_ticket(without_return=False):
if 'search_ticket' in pinfo:
del(pinfo['search_ticket'])
session.dirty = True
if "claimpaper_admin_last_viewed_pid" in pinfo:
pid = pinfo["claimpaper_admin_last_viewed_pid"]
if not without_return:
return redirect_to_url(req, "%s/author/claim/%s" % (CFG_SITE_URL, webapi.get_person_redirect_link(pid)))
if not without_return:
return self.search(req, form)
def cancel_stage():
if 'bibref_check_required' in pinfo:
del(pinfo['bibref_check_required'])
if 'bibrefs_auto_assigned' in pinfo:
del(pinfo['bibrefs_auto_assigned'])
if 'bibrefs_to_confirm' in pinfo:
del(pinfo['bibrefs_to_confirm'])
for tt in [row for row in ticket if 'incomplete' in row]:
ticket.remove(tt)
session.dirty = True
return self._ticket_dispatch_end(req)
def checkout():
pass
# return self._ticket_final_review(req)
def checkout_continue_claiming():
pinfo["checkout_faulty_fields"] = list()
self._check_user_fields(req, form)
return self._ticket_dispatch_end(req)
def checkout_remove_transaction():
bibref = argd['checkout_remove_transaction']
if webapi.is_valid_bibref(bibref):
for rmt in [row for row in ticket if row["bibref"] == bibref]:
ticket.remove(rmt)
pinfo["checkout_confirmed"] = False
session.dirty = True
# return self._ticket_final_review(req)
def checkout_submit():
pinfo["checkout_faulty_fields"] = list()
self._check_user_fields(req, form)
if not ticket:
pinfo["checkout_faulty_fields"].append("tickets")
pinfo["checkout_confirmed"] = True
if pinfo["checkout_faulty_fields"]:
pinfo["checkout_confirmed"] = False
session.dirty = True
# return self._ticket_final_review(req)
def claim():
if argd['selection'] is not None:
bibrefrecs = argd['selection']
else:
return self._error_page(req, ln,
"Fatal: cannot create ticket without any bibrefrec")
if argd['pid'] > -1:
pid = argd['pid']
else:
return self._error_page(req, ln,
"Fatal: cannot claim papers to an unknown person")
if action == 'assign':
claimed_recs = [paper[2] for paper in get_claimed_papers_of_author(pid)]
for bibrefrec in list(bibrefrecs):
_, rec = webapi.split_bibrefrec(bibrefrec)
if rec in claimed_recs:
bibrefrecs.remove(bibrefrec)
for bibrefrec in bibrefrecs:
operation_parts = {'pid': pid,
'action': action,
'bibrefrec': bibrefrec}
operation_to_be_added = webapi.construct_operation(operation_parts, pinfo, uid)
if operation_to_be_added is None:
continue
ticket = pinfo['ticket']
webapi.add_operation_to_ticket(operation_to_be_added, ticket)
session.dirty = True
return redirect_to_url(req, "%s/author/claim/%s" % (CFG_SITE_URL, webapi.get_person_redirect_link(pid)))
def claim_to_other_person():
if argd['selection'] is not None:
bibrefrecs = argd['selection']
else:
return self._error_page(req, ln,
"Fatal: cannot create ticket without any bibrefrec")
return self._ticket_open_assign_to_other_person(req, bibrefrecs, form)
def commit_rt_ticket():
if argd['selection'] is not None:
tid = argd['selection'][0]
else:
return self._error_page(req, ln,
"Fatal: cannot cancel unknown ticket")
if argd['pid'] > -1:
pid = argd['pid']
else:
return self._error_page(req, ln,
"Fatal: cannot cancel unknown ticket")
return self._commit_rt_ticket(req, tid, pid)
def confirm_repeal_reset():
if argd['pid'] > -1 or int(argd['pid']) == CREATE_NEW_PERSON:
pid = argd['pid']
cancel_search_ticket(without_return = True)
else:
return self._ticket_open_assign_to_other_person(req, argd['selection'], form)
#return self._error_page(req, ln, "Fatal: cannot create ticket without a person id! (crr %s)" %repr(argd))
bibrefrecs = argd['selection']
if argd['confirm']:
action = 'assign'
elif argd['repeal']:
action = 'reject'
elif argd['reset']:
action = 'reset'
else:
return self._error_page(req, ln, "Fatal: not existent action!")
for bibrefrec in bibrefrecs:
form['jsondata'] = json.dumps({'pid': str(pid),
'action': action,
'bibrefrec': bibrefrec,
'on': 'user'})
t = WebInterfaceAuthorTicketHandling()
t.add_operation(req, form)
return redirect_to_url(req, "%s/author/claim/%s" % (CFG_SITE_URL, webapi.get_person_redirect_link(pid)))
def delete_external_ids():
'''
deletes association between the user with pid and the external id ext_id
'''
if argd['pid'] > -1:
pid = argd['pid']
else:
return self._error_page(req, ln,
"Fatal: cannot delete external ids from an unknown person")
if argd['existing_ext_ids'] is not None:
existing_ext_ids = argd['existing_ext_ids']
else:
return self._error_page(req, ln,
"Fatal: you must select at least one external id in order to delete it")
userinfo = "%s||%s" % (uid, req.remote_ip)
webapi.delete_person_external_ids(pid, existing_ext_ids, userinfo)
return redirect_to_url(req, "%s/author/manage_profile/%s" % (CFG_SITE_URL, webapi.get_person_redirect_link(pid)))
def none_action():
return self._error_page(req, ln,
"Fatal: cannot create ticket if no action selected.")
def merge():
'''
performs a merge if allowed on the profiles that the user chose
'''
if argd['primary_profile']:
primary_cname = argd['primary_profile']
else:
return self._error_page(req, ln,
"Fatal: cannot perform a merge without a primary profile!")
if argd['selection']:
profiles_to_merge = argd['selection']
else:
return self._error_page(req, ln,
"Fatal: cannot perform a merge without any profiles selected!")
webapi.session_bareinit(req)
session = get_session(req)
pinfo = session['personinfo']
uid = getUid(req)
primary_pid = webapi.get_person_id_from_canonical_id(primary_cname)
pids_to_merge = [webapi.get_person_id_from_canonical_id(cname) for cname in profiles_to_merge]
is_admin = False
if pinfo['ulevel'] == 'admin':
is_admin = True
# checking if there are restrictions regarding this merge
can_perform_merge, preventing_pid = webapi.merge_is_allowed(primary_pid, pids_to_merge, is_admin)
if not can_perform_merge:
# when redirected back to the merge profiles page display an error message about the currently attempted merge
pinfo['merge_info_message'] = ("failure", "confirm_failure")
session.dirty = True
redirect_url = "%s/author/merge_profiles?primary_profile=%s" % (CFG_SITE_URL, primary_cname)
return redirect_to_url(req, redirect_url)
if is_admin:
webapi.merge_profiles(primary_pid, pids_to_merge)
# when redirected back to the manage profile page display a message about the currently attempted merge
pinfo['merge_info_message'] = ("success", "confirm_success")
else:
name = ''
if 'user_last_name' in pinfo:
name = pinfo['user_last_name']
if 'user_first_name' in pinfo:
name += pinfo['user_first_name']
email = ''
if 'user_email' in pinfo:
email = pinfo['user_email']
selection_str = "&selection=".join(profiles_to_merge)
userinfo = {'uid-ip': "userid: %s (from %s)" % (uid, req.remote_ip),
'name': name,
'email': email,
'merge link': "%s/author/merge_profiles?primary_profile=%s&selection=%s" % (CFG_SITE_URL, primary_cname, selection_str)}
# a message is sent to the admin with info regarding the currently attempted merge
webapi.create_request_message(userinfo, subj='Merge profiles request')
# when redirected back to the manage profile page display a message about the merge
pinfo['merge_info_message'] = ("success", "confirm_operation")
pinfo['merge_profiles'] = list()
session.dirty = True
redirect_url = "%s/author/manage_profile/%s" % (CFG_SITE_URL, primary_cname)
return redirect_to_url(req, redirect_url)
def send_message():
'''
sends a message from the user to the admin
'''
webapi.session_bareinit(req)
session = get_session(req)
pinfo = session['personinfo']
#pp = pprint.PrettyPrinter(indent=4)
#session_dump = pp.pprint(pinfo)
session_dump = str(pinfo)
name = ''
name_changed = False
name_given = ''
email = ''
email_changed = False
email_given = ''
comment = ''
last_page_visited = ''
if "user_last_name" in pinfo:
name = pinfo["user_last_name"]
if "user_first_name" in pinfo:
name += pinfo["user_first_name"]
name = name.rstrip()
if "user_email" in pinfo:
email = pinfo["user_email"]
email = email.rstrip()
if 'Name' in form:
if not name:
name = form['Name']
elif name != form['Name']:
name_given = form['Name']
name_changed = True
name = name.rstrip()
if 'E-mail'in form:
if not email:
email = form['E-mail']
elif name != form['E-mail']:
email_given = form['E-mail']
email_changed = True
email = email.rstrip()
if 'Comment' in form:
comment = form['Comment']
comment = comment.rstrip()
if not name or not comment or not email:
redirect_to_url(req, '%s/author/help?incomplete_params=%s' % (CFG_SITE_URL, True))
if 'last_page_visited' in form:
last_page_visited = form['last_page_visited']
uid = getUid(req)
userinfo = {'uid-ip': "userid: %s (from %s)" % (uid, req.remote_ip),
'name': name,
'email': email,
'comment': comment,
'last_page_visited': last_page_visited,
'session_dump': session_dump,
'name_given': name_given,
'email_given': email_given,
'name_changed': name_changed,
'email_changed': email_changed}
webapi.create_request_message(userinfo)
def set_canonical_name():
if argd['pid'] > -1:
pid = argd['pid']
else:
return self._error_page(req, ln,
"Fatal: cannot set canonical name to unknown person")
if argd['canonical_name'] is not None:
cname = argd['canonical_name']
else:
return self._error_page(req, ln,
"Fatal: cannot set a custom canonical name without a suggestion")
userinfo = "%s||%s" % (uid, req.remote_ip)
if webapi.is_valid_canonical_id(cname):
webapi.swap_person_canonical_name(pid, cname, userinfo)
else:
webapi.update_person_canonical_name(pid, cname, userinfo)
return redirect_to_url(req, "%s/author/claim/%s%s" % (CFG_SITE_URL, webapi.get_person_redirect_link(pid), '#tabData'))
action_functions = {'add_external_id': add_external_id,
'set_uid': set_uid,
'add_missing_external_ids': add_missing_external_ids,
'associate_profile': associate_profile,
'bibref_check_submit': bibref_check_submit,
'cancel': cancel,
'cancel_merging': cancel_merging,
'cancel_rt_ticket': cancel_rt_ticket,
'cancel_search_ticket': cancel_search_ticket,
'cancel_stage': cancel_stage,
'checkout': checkout,
'checkout_continue_claiming': checkout_continue_claiming,
'checkout_remove_transaction': checkout_remove_transaction,
'checkout_submit': checkout_submit,
'assign': claim,
'commit_rt_ticket': commit_rt_ticket,
'confirm': confirm_repeal_reset,
'delete_external_ids': delete_external_ids,
'merge': merge,
'reject': claim,
'repeal': confirm_repeal_reset,
'reset': confirm_repeal_reset,
'send_message': send_message,
'set_canonical_name': set_canonical_name,
'to_other_person': claim_to_other_person,
None: none_action}
return action_functions[action]()
def _ticket_open_claim(self, req, bibrefs, ln):
'''
Generate page to let user choose how to proceed
@param req: Apache Request Object
@type req: Apache Request Object
@param bibrefs: list of record IDs to perform an action on
@type bibrefs: list of int
@param ln: language to display the page in
@type ln: string
'''
session = get_session(req)
uid = getUid(req)
uinfo = collect_user_info(req)
pinfo = session["personinfo"]
if 'ln' in pinfo:
ln = pinfo["ln"]
else:
ln = CFG_SITE_LANG
_ = gettext_set_language(ln)
no_access = self._page_access_permission_wall(req)
session.dirty = True
pid = -1
search_enabled = True
if not no_access and uinfo["precached_usepaperclaim"]:
tpid = webapi.get_pid_from_uid(uid)
if tpid > -1:
pid = tpid
last_viewed_pid = False
if (not no_access
and "claimpaper_admin_last_viewed_pid" in pinfo
and pinfo["claimpaper_admin_last_viewed_pid"]):
names = webapi.get_person_names_from_id(pinfo["claimpaper_admin_last_viewed_pid"])
names = sorted([i for i in names], key=lambda k: k[1], reverse=True)
if len(names) > 0:
if len(names[0]) > 0:
last_viewed_pid = [pinfo["claimpaper_admin_last_viewed_pid"], names[0][0]]
if no_access:
search_enabled = False
pinfo["referer"] = uinfo["referer"]
session.dirty = True
body = TEMPLATE.tmpl_open_claim(bibrefs, pid, last_viewed_pid,
search_enabled=search_enabled)
body = TEMPLATE.tmpl_person_detail_layout(body)
title = _('Claim this paper')
metaheaderadd = WebInterfaceBibAuthorIDClaimPages._scripts(kill_browser_cache=True)
return page(title=title,
metaheaderadd=metaheaderadd,
body=body,
req=req,
language=ln)
def _ticket_open_assign_to_other_person(self, req, bibrefs, form):
'''
Initializes search to find a person to attach the selected records to
@param req: Apache request object
@type req: Apache request object
@param bibrefs: list of record IDs to consider
@type bibrefs: list of int
@param form: GET/POST request parameters
@type form: dict
'''
session = get_session(req)
pinfo = session["personinfo"]
pinfo["search_ticket"] = dict()
search_ticket = pinfo["search_ticket"]
search_ticket['action'] = 'assign'
search_ticket['bibrefs'] = bibrefs
session.dirty = True
return self.search(req, form)
def _cancel_rt_ticket(self, req, tid, pid):
'''
deletes an RT ticket
'''
webapi.delete_request_ticket(pid, tid)
return redirect_to_url(req, "%s/author/claim/%s" %
(CFG_SITE_URL, webapi.get_person_redirect_link(str(pid))))
def _cancel_transaction_from_rt_ticket(self, tid, pid, action, bibref):
'''
deletes a transaction from an rt ticket
'''
webapi.delete_transaction_from_request_ticket(pid, tid, action, bibref)
def _commit_rt_ticket(self, req, tid, pid):
'''
Commit of an rt ticket: creates a real ticket and commits.
'''
session = get_session(req)
pinfo = session["personinfo"]
ticket = pinfo["ticket"]
uid = getUid(req)
tid = int(tid)
rt_ticket = get_validated_request_tickets_for_author(pid, tid)[0]
for action, bibrefrec in rt_ticket['operations']:
operation_parts = {'pid': pid,
'action': action,
'bibrefrec': bibrefrec}
operation_to_be_added = webapi.construct_operation(operation_parts, pinfo, uid)
webapi.add_operation_to_ticket(operation_to_be_added, ticket)
session.dirty = True
webapi.delete_request_ticket(pid, tid)
redirect_to_url(req, "%s/author/claim/%s" % (CFG_SITE_URL, pid))
def _error_page(self, req, ln=CFG_SITE_LANG, message=None, intro=True):
'''
Create a page that contains a message explaining the error.
@param req: Apache Request Object
@type req: Apache Request Object
@param ln: language
@type ln: string
@param message: message to be displayed
@type message: string
'''
body = []
_ = gettext_set_language(ln)
if not message:
message = "No further explanation available. Sorry."
if intro:
body.append(_("<p>We're sorry. An error occurred while "
"handling your request. Please find more information "
"below:</p>"))
body.append("<p><strong>%s</strong></p>" % message)
return page(title=_("Notice"),
body="\n".join(body),
description="%s - Internal Error" % CFG_SITE_NAME,
keywords="%s, Internal Error" % CFG_SITE_NAME,
language=ln,
req=req)
def __session_cleanup(self, req):
'''
Cleans the session from all bibauthorid specific settings and
with that cancels any transaction currently in progress.
@param req: Apache Request Object
@type req: Apache Request Object
'''
session = get_session(req)
try:
pinfo = session["personinfo"]
except KeyError:
return
if "ticket" in pinfo:
pinfo['ticket'] = []
if "search_ticket" in pinfo:
pinfo['search_ticket'] = dict()
# clear up bibref checker if it's done.
if ("bibref_check_required" in pinfo
and not pinfo["bibref_check_required"]):
if 'bibrefs_to_confirm' in pinfo:
del(pinfo['bibrefs_to_confirm'])
if "bibrefs_auto_assigned" in pinfo:
del(pinfo["bibrefs_auto_assigned"])
del(pinfo["bibref_check_required"])
if "checkout_confirmed" in pinfo:
del(pinfo["checkout_confirmed"])
if "checkout_faulty_fields" in pinfo:
del(pinfo["checkout_faulty_fields"])
# pinfo['ulevel'] = ulevel
# pinfo["claimpaper_admin_last_viewed_pid"] = -1
pinfo["admin_requested_ticket_id"] = -1
session.dirty = True
def _generate_search_ticket_box(self, req):
'''
Generate the search ticket to remember a pending search for Person
entities in an attribution process
@param req: Apache request object
@type req: Apache request object
'''
session = get_session(req)
pinfo = session["personinfo"]
search_ticket = None
if 'search_ticket' in pinfo:
search_ticket = pinfo['search_ticket']
if not search_ticket:
return ''
else:
return TEMPLATE.tmpl_search_ticket_box('person_search', 'assign_papers', search_ticket['bibrefs'])
def search_box(self, query, shown_element_functions):
'''
collecting the persons' data that the search function returned
@param req: Apache request object
@type req: Apache request object
@param query: the query string
@type query: string
@param shown_element_functions: contains the functions that will tell to the template which columns to show and what buttons to print
@type shown_element_functions: dict
@return: html body
@rtype: string
'''
pid_list = self._perform_search(query)
search_results = []
for pid in pid_list:
result = defaultdict(list)
result['pid'] = pid
result['canonical_id'] = webapi.get_canonical_id_from_person_id(pid)
result['name_variants'] = webapi.get_person_names_from_id(pid)
result['external_ids'] = webapi.get_external_ids_from_person_id(pid)
# this variable shows if we want to use the following data in the search template
if 'pass_status' in shown_element_functions and shown_element_functions['pass_status']:
result['status'] = webapi.is_profile_available(pid)
search_results.append(result)
body = TEMPLATE.tmpl_author_search(query, search_results, shown_element_functions)
body = TEMPLATE.tmpl_person_detail_layout(body)
return body
def search(self, req, form):
'''
Function used for searching a person based on a name with which the
function is queried.
@param req: Apache Request Object
@type form: dict
@return: a full page formatted in HTML
@rtype: string
'''
webapi.session_bareinit(req)
session = get_session(req)
pinfo = session['personinfo']
ulevel = pinfo['ulevel']
person_id = self.person_id
uid = getUid(req)
argd = wash_urlargd(
form,
{'ln': (str, CFG_SITE_LANG),
'verbose': (int, 0),
'q': (str, None)})
debug = "verbose" in argd and argd["verbose"] > 0
ln = argd['ln']
cname = ''
is_owner = False
last_visited_pid = webapi.history_get_last_visited_pid(session['personinfo']['visit_diary'])
if last_visited_pid is not None:
cname = webapi.get_canonical_id_from_person_id(last_visited_pid)
try:
int(cname)
except ValueError:
is_owner = False
else:
is_owner = self._is_profile_owner(last_visited_pid)
menu = WebProfileMenu(str(cname), "search", ln, is_owner, self._is_admin(pinfo))
title = "Person search"
# Create Wrapper Page Markup
profile_page = WebProfilePage("search", title, no_cache=True)
profile_page.add_profile_menu(menu)
profile_page.add_bootstrapped_data(json.dumps({
"other": "var gBOX_STATUS = '10';var gPID = '10'; var gNumOfWorkers= '10'; var gReqTimeout= '10'; var gPageTimeout= '10';",
"backbone": """
(function(ticketbox) {
var app = ticketbox.app;
app.userops.set(%s);
app.bodyModel.set({userLevel: "%s"});
})(ticketbox);""" % (WebInterfaceAuthorTicketHandling.bootstrap_status(pinfo, "user"), ulevel)
}))
if debug:
profile_page.add_debug_info(pinfo)
no_access = self._page_access_permission_wall(req)
shown_element_functions = dict()
shown_element_functions['show_search_bar'] = TEMPLATE.tmpl_general_search_bar()
if no_access:
return no_access
search_ticket = None
bibrefs = []
if 'search_ticket' in pinfo:
search_ticket = pinfo['search_ticket']
for r in search_ticket['bibrefs']:
bibrefs.append(r)
if search_ticket and "ulevel" in pinfo:
if pinfo["ulevel"] == "admin":
shown_element_functions['new_person_gen'] = TEMPLATE.tmpl_assigning_search_new_person_generator(bibrefs)
content = ""
if search_ticket:
shown_element_functions['button_gen'] = TEMPLATE.tmpl_assigning_search_button_generator(bibrefs)
content = content + self._generate_search_ticket_box(req)
query = None
if 'q' in argd:
if argd['q']:
query = escape(argd['q'])
content += self.search_box(query, shown_element_functions)
body = profile_page.get_wrapped_body(content)
parameter = None
if query:
parameter = '?search_param=%s' + query
webapi.history_log_visit(req, 'search', params = parameter)
return page(title=title,
metaheaderadd=profile_page.get_head().encode('utf-8'),
body=body.encode('utf-8'),
req=req,
language=ln,
show_title_p=False)
def merge_profiles(self, req, form):
'''
begginig of the proccess that performs the merge over multipe person profiles
@param req: Apache Request Object
@type form: dict
@return: a full page formatted in HTML
@rtype: string
'''
argd = wash_urlargd(form, {'ln': (str, CFG_SITE_LANG),
'primary_profile': (str, None),
'search_param': (str, ''),
'selection': (list, None),
'verbose': (int, 0)})
ln = argd['ln']
primary_cname = argd['primary_profile']
search_param = argd['search_param']
selection = argd['selection']
debug = 'verbose' in argd and argd['verbose'] > 0
webapi.session_bareinit(req)
session = get_session(req)
pinfo = session['personinfo']
profiles_to_merge = pinfo['merge_profiles']
_ = gettext_set_language(ln)
if not primary_cname:
return page_not_authorized(req, text=_('This page is not accessible directly.'))
no_access = self._page_access_permission_wall(req)
if no_access:
return no_access
if selection is not None:
profiles_to_merge_session = [cname for cname, is_available in profiles_to_merge]
for profile in selection:
if profile not in profiles_to_merge_session:
pid = webapi.get_person_id_from_canonical_id(profile)
is_available = webapi.is_profile_available(pid)
pinfo['merge_profiles'].append([profile, '1' if is_available else '0'])
session.dirty = True
primary_pid = webapi.get_person_id_from_canonical_id(primary_cname)
is_available = webapi.is_profile_available(primary_pid)
body = ''
cname = ''
is_owner = False
last_visited_pid = webapi.history_get_last_visited_pid(session['personinfo']['visit_diary'])
if last_visited_pid is not None:
cname = webapi.get_canonical_id_from_person_id(last_visited_pid)
is_owner = self._is_profile_owner(last_visited_pid)
title = 'Merge Profiles'
menu = WebProfileMenu(str(cname), "manage_profile", ln, is_owner, self._is_admin(pinfo))
merge_page = WebProfilePage("merge_profile", title, no_cache=True)
merge_page.add_profile_menu(menu)
if debug:
merge_page.add_debug_info(pinfo)
# display status for any previously attempted merge
if pinfo['merge_info_message']:
teaser_key, message = pinfo['merge_info_message']
body += TEMPLATE.tmpl_merge_transaction_box(teaser_key, [message])
pinfo['merge_info_message'] = None
session.dirty = True
body += TEMPLATE.tmpl_merge_ticket_box('person_search', 'merge_profiles', primary_cname)
shown_element_functions = dict()
shown_element_functions['show_search_bar'] = TEMPLATE.tmpl_merge_profiles_search_bar(primary_cname)
shown_element_functions['button_gen'] = TEMPLATE.merge_profiles_button_generator()
shown_element_functions['pass_status'] = 'True'
merge_page.add_bootstrapped_data(json.dumps({
"other": "var gMergeProfile = %s; var gMergeList = %s;" % ([primary_cname, '1' if is_available else '0'], profiles_to_merge)
}))
body += self.search_box(search_param, shown_element_functions)
body = merge_page.get_wrapped_body(body)
return page(title=title,
metaheaderadd=merge_page.get_head().encode('utf-8'),
body=body.encode('utf-8'),
req=req,
language=ln,
show_title_p=False)
def _perform_search(self, search_param):
'''
calls the search function on the search_param and returns the results
@param search_param: query string
@type search_param: String
@return: list of pids that the search found they match with the search query
@return: list
'''
pid_canditates_list = []
nquery = None
if search_param:
if search_param.count(":"):
try:
left, right = search_param.split(":")
try:
nsearch_param = str(right)
except (ValueError, TypeError):
try:
nsearch_param = str(left)
except (ValueError, TypeError):
nsearch_param = search_param
except ValueError:
nsearch_param = search_param
else:
nsearch_param = search_param
sorted_results = webapi.search_person_ids_by_name(nsearch_param)
for result in sorted_results:
pid_canditates_list.append(result[0])
return pid_canditates_list
def merge_profiles_ajax(self, req, form):
'''
Function used for handling Ajax requests used in order to add/remove profiles
in/from the merging profiles list, which is saved in the session.
@param req: Apache Request Object
@type req: Apache Request Object
@param form: Parameters sent via Ajax request
@type form: dict
@return: json data
'''
# Abort if the simplejson module isn't available
if not CFG_JSON_AVAILABLE:
print "Json not configurable"
# If it is an Ajax request, extract any JSON data.
ajax_request = False
# REcent papers request
if form.has_key('jsondata'):
json_data = json.loads(str(form['jsondata']))
# Deunicode all strings (Invenio doesn't have unicode
# support).
json_data = json_unicode_to_utf8(json_data)
ajax_request = True
json_response = {'resultCode': 0}
# Handle request.
if ajax_request:
req_type = json_data['requestType']
if req_type == 'addProfile':
if json_data.has_key('profile'):
profile = json_data['profile']
person_id = webapi.get_person_id_from_canonical_id(profile)
if person_id != -1:
webapi.session_bareinit(req)
session = get_session(req)
profiles_to_merge = session["personinfo"]["merge_profiles"]
profile_availability = webapi.is_profile_available(person_id)
if profile_availability:
profile_availability = "1"
else:
profile_availability = "0"
if profile not in [el[0] for el in profiles_to_merge]:
profiles_to_merge.append([profile, profile_availability])
session.dirty = True
# TODO check access rights and get profile from db
json_response.update({'resultCode': 1})
json_response.update({'addedPofile': profile})
json_response.update({'addedPofileAvailability': profile_availability})
else:
json_response.update({'result': 'Error: Profile does not exist'})
else:
json_response.update({'result': 'Error: Profile was already in the list'})
else:
json_response.update({'result': 'Error: Missing profile'})
elif req_type == 'removeProfile':
if json_data.has_key('profile'):
profile = json_data['profile']
if webapi.get_person_id_from_canonical_id(profile) != -1:
webapi.session_bareinit(req)
session = get_session(req)
profiles_to_merge = session["personinfo"]["merge_profiles"]
# print (str(profiles_to_merge))
if profile in [el[0] for el in profiles_to_merge]:
for prof in list(profiles_to_merge):
if prof[0] == profile:
profiles_to_merge.remove(prof)
session.dirty = True
# TODO check access rights and get profile from db
json_response.update({'resultCode': 1})
json_response.update({'removedProfile': profile})
else:
json_response.update({'result': 'Error: Profile was missing already from the list'})
else:
json_response.update({'result': 'Error: Profile does not exist'})
else:
json_response.update({'result': 'Error: Missing profile'})
elif req_type == 'setPrimaryProfile':
if json_data.has_key('profile'):
profile = json_data['profile']
profile_id = webapi.get_person_id_from_canonical_id(profile)
if profile_id != -1:
webapi.session_bareinit(req)
session = get_session(req)
profile_availability = webapi.is_profile_available(profile_id)
if profile_availability:
profile_availability = "1"
else:
profile_availability = "0"
profiles_to_merge = session["personinfo"]["merge_profiles"]
if profile in [el[0] for el in profiles_to_merge if el and el[0]]:
for prof in list(profiles_to_merge):
if prof[0] == profile:
profiles_to_merge.remove(prof)
primary_profile = session["personinfo"]["merge_primary_profile"]
if primary_profile not in profiles_to_merge:
profiles_to_merge.append(primary_profile)
session["personinfo"]["merge_primary_profile"] = [profile, profile_availability]
session.dirty = True
json_response.update({'resultCode': 1})
json_response.update({'primaryProfile': profile})
json_response.update({'primaryPofileAvailability': profile_availability})
else:
json_response.update({'result': 'Error: Profile was already in the list'})
else:
json_response.update({'result': 'Error: Missing profile'})
else:
json_response.update({'result': 'Error: Wrong request type'})
return json.dumps(json_response)
def search_box_ajax(self, req, form):
'''
Function used for handling Ajax requests used in the search box.
@param req: Apache Request Object
@type req: Apache Request Object
@param form: Parameters sent via Ajax request
@type form: dict
@return: json data
'''
# Abort if the simplejson module isn't available
if not CFG_JSON_AVAILABLE:
print "Json not configurable"
# If it is an Ajax request, extract any JSON data.
ajax_request = False
# REcent papers request
if form.has_key('jsondata'):
json_data = json.loads(str(form['jsondata']))
# Deunicode all strings (Invenio doesn't have unicode
# support).
json_data = json_unicode_to_utf8(json_data)
ajax_request = True
json_response = {'resultCode': 0}
# Handle request.
if ajax_request:
req_type = json_data['requestType']
if req_type == 'getPapers':
if json_data.has_key('personId'):
pId = json_data['personId']
papers = sorted([[p[0]] for p in webapi.get_papers_by_person_id(int(pId), -1)],
key=itemgetter(0))
papers_html = TEMPLATE.tmpl_gen_papers(papers[0:MAX_NUM_SHOW_PAPERS])
json_response.update({'result': "\n".join(papers_html)})
json_response.update({'totalPapers': len(papers)})
json_response.update({'resultCode': 1})
json_response.update({'pid': str(pId)})
else:
json_response.update({'result': 'Error: Missing person id'})
elif req_type == 'getNames':
if json_data.has_key('personId'):
pId = json_data['personId']
names = webapi.get_person_names_from_id(int(pId))
names_html = TEMPLATE.tmpl_gen_names(names)
json_response.update({'result': "\n".join(names_html)})
json_response.update({'resultCode': 1})
json_response.update({'pid': str(pId)})
elif req_type == 'getIDs':
if json_data.has_key('personId'):
pId = json_data['personId']
ids = webapi.get_external_ids_from_person_id(int(pId))
ids_html = TEMPLATE.tmpl_gen_ext_ids(ids)
json_response.update({'result': "\n".join(ids_html)})
json_response.update({'resultCode': 1})
json_response.update({'pid': str(pId)})
elif req_type == 'isProfileClaimed':
if json_data.has_key('personId'):
pId = json_data['personId']
isClaimed = webapi.get_uid_from_personid(pId)
if isClaimed != -1:
json_response.update({'resultCode': 1})
json_response.update({'pid': str(pId)})
else:
json_response.update({'result': 'Error: Wrong request type'})
return json.dumps(json_response)
def choose_profile(self, req, form):
'''
Generate SSO landing/choose_profile page
@param req: Apache request object
@type req: Apache request object
@param form: GET/POST request params
@type form: dict
'''
argd = wash_urlargd(form, {'ln': (str, CFG_SITE_LANG),
'search_param': (str, None),
'failed': (str, None),
'verbose': (int, 0)})
ln = argd['ln']
debug = "verbose" in argd and argd["verbose"] > 0
req.argd = argd # needed for perform_req_search
search_param = argd['search_param']
webapi.session_bareinit(req)
session = get_session(req)
uid = getUid(req)
pinfo = session['personinfo']
failed = True
if not argd['failed']:
failed = False
_ = gettext_set_language(ln)
if not CFG_INSPIRE_SITE:
return page_not_authorized(req, text=_("This page is not accessible directly."))
params = WebInterfaceBibAuthorIDClaimPages.get_params_to_check_login_info(session)
login_info = webapi.get_login_info(uid, params)
if 'arXiv' not in login_info['logged_in_to_remote_systems']:
return page_not_authorized(req, text=_("This page is not accessible directly."))
pid = webapi.get_user_pid(login_info['uid'])
# Create Wrapper Page Markup
is_owner = False
menu = WebProfileMenu('', "choose_profile", ln, is_owner, self._is_admin(pinfo))
choose_page = WebProfilePage("choose_profile", "Choose your profile", no_cache=True)
choose_page.add_profile_menu(menu)
if debug:
choose_page.add_debug_info(pinfo)
content = TEMPLATE.tmpl_choose_profile(failed)
body = choose_page.get_wrapped_body(content)
#In any case, when we step by here, an autoclaim should be performed right after!
pinfo = session["personinfo"]
pinfo['should_check_to_autoclaim'] = True
session.dirty = True
last_visited_pid = webapi.history_get_last_visited_pid(session['personinfo']['visit_diary'])
# if already logged in then redirect the user to the page he was viewing
if pid != -1:
redirect_pid = pid
if last_visited_pid:
redirect_pid = last_visited_pid
redirect_to_url(req, '%s/author/manage_profile/%s' % (CFG_SITE_URL, str(redirect_pid)))
else:
# get name strings and email addresses from SSO/Oauth logins: {'system':{'name':[variant1,...,variantn], 'email':'blabla@bla.bla', 'pants_size':20}}
remote_login_systems_info = webapi.get_remote_login_systems_info(req, login_info['logged_in_to_remote_systems'])
# get union of recids that are associated to the ids from all the external systems: set(inspire_recids_list)
recids = webapi.get_remote_login_systems_recids(req, login_info['logged_in_to_remote_systems'])
# this is the profile with the biggest intersection of papers so it's more probable that this is the profile the user seeks
probable_pid = webapi.match_profile(req, recids, remote_login_systems_info)
# if not search_param and probable_pid > -1 and probable_pid == last_visited_pid:
# # try to assign the user to the profile he chose. If for some reason the profile is not available we assign him to an empty profile
# redirect_pid, profile_claimed = webapi.claim_profile(login_info['uid'], probable_pid)
# if profile_claimed:
# redirect_to_url(req, '%s/author/claim/action?associate_profile=True&redirect_pid=%s' % (CFG_SITE_URL, str(redirect_pid)))
probable_profile_suggestion_info = None
last_viewed_profile_suggestion_info = None
if last_visited_pid > -1 and webapi.is_profile_available(last_visited_pid):
# get information about the most probable profile and show it to the user
last_viewed_profile_suggestion_info = webapi.get_profile_suggestion_info(req, last_visited_pid, recids)
if probable_pid > -1 and webapi.is_profile_available(probable_pid):
# get information about the most probable profile and show it to the user
probable_profile_suggestion_info = webapi.get_profile_suggestion_info(req, probable_pid, recids )
if not search_param:
# we prefil the search with most relevant among the names that we get from external systems
name_variants = webapi.get_name_variants_list_from_remote_systems_names(remote_login_systems_info)
search_param = most_relevant_name(name_variants)
body = body + TEMPLATE.tmpl_probable_profile_suggestion(probable_profile_suggestion_info, last_viewed_profile_suggestion_info, search_param)
shown_element_functions = dict()
shown_element_functions['button_gen'] = TEMPLATE.tmpl_choose_profile_search_button_generator()
shown_element_functions['new_person_gen'] = TEMPLATE.tmpl_choose_profile_search_new_person_generator()
shown_element_functions['show_search_bar'] = TEMPLATE.tmpl_choose_profile_search_bar()
# show in the templates the column status (if profile is bound to a user or not)
shown_element_functions['show_status'] = True
# pass in the templates the data of the column status (if profile is bound to a user or not)
# we might need the data without having to show them in the columne (fi merge_profiles
shown_element_functions['pass_status'] = True
# show search results to the user
body = body + self.search_box(search_param, shown_element_functions)
body = body + TEMPLATE.tmpl_choose_profile_footer()
title = _(' ')
return page(title=title,
metaheaderadd=choose_page.get_head().encode('utf-8'),
body=body,
req=req,
language=ln)
@staticmethod
def _arxiv_box(req, login_info, person_id, user_pid):
'''
Proccess and collect data for arXiv box
@param req: Apache request object
@type req: Apache request object
@param login_info: status of login in the following format: {'logged_in': True, 'uid': 2, 'logged_in_to_remote_systems':['Arxiv', ...]}
@type login_info: dict
@param login_info: person id of the current page's profile
@type login_info: int
@param login_info: person id of the user
@type login_info: int
@return: data required to built the arXiv box
@rtype: dict
'''
session = get_session(req)
pinfo = session["personinfo"]
arxiv_data = dict()
arxiv_data['view_own_profile'] = person_id == user_pid
# if the user is not a guest and he is connected through arXiv
arxiv_data['login'] = login_info['logged_in']
arxiv_data['user_pid'] = user_pid
arxiv_data['user_has_pid'] = user_pid != -1
# if the profile the use is logged in is the same with the profile of the page that the user views
arxiv_data['view_own_profile'] = user_pid == person_id
return arxiv_data
@staticmethod
def _orcid_box(arxiv_logged_in, person_id, user_pid, ulevel):
'''
Proccess and collect data for orcid box
@param req: Apache request object
@type req: Apache request object
@param arxiv_logged_in: shows if the user is logged in through arXiv or not
@type arxiv_logged_in: boolean
@param person_id: person id of the current page's profile
@type person_id: int
@param user_pid: person id of the user
@type user_pid: int
@param ulevel: user's level
@type ulevel: string
@return: data required to built the orcid box
@rtype: dict
'''
orcid_data = dict()
orcid_data['arxiv_login'] = arxiv_logged_in
orcid_data['orcids'] = None
orcid_data['add_power'] = False
orcid_data['own_profile'] = False
orcid_data['pid'] = person_id
# if the profile the use is logged in is the same with the profile of the page that the user views
if person_id == user_pid:
orcid_data['own_profile'] = True
# if the user is an admin then he can add an existing orcid to the profile
if ulevel == "admin":
orcid_data['add_power'] = True
orcids = webapi.get_orcids_by_pid(person_id)
if orcids:
orcid_data['orcids'] = orcids
return orcid_data
@staticmethod
def _autoclaim_papers_box(req, person_id, user_pid, remote_logged_in_systems):
'''
Proccess and collect data for orcid box
@param req: Apache request object
@type req: Apache request object
@param person_id: person id of the current page's profile
@type person_id: int
@param user_pid: person id of the user
@type user_pid: int
@param remote_logged_in_systems: the remote logged in systems
@type remote_logged_in_systems: list
@return: data required to built the autoclaim box
@rtype: dict
'''
autoclaim_data = dict()
# if no autoclaim should occur or had occured and results should be shown then the box should remain hidden
autoclaim_data['hidden'] = True
autoclaim_data['person_id'] = person_id
# if the profile the use is logged in is the same with the profile of the page that the user views
if person_id == user_pid:
recids_to_autoclaim = webapi.get_remote_login_systems_recids(req, remote_logged_in_systems)
autoclaim_data['hidden'] = False
autoclaim_data['num_of_claims'] = len(recids_to_autoclaim)
return autoclaim_data
############################################
# New autoclaim functions #
############################################
def generate_autoclaim_data(self, req, form):
# Abort if the simplejson module isn't available
assert CFG_JSON_AVAILABLE, "Json not available"
# Fail if no json data exists in the Ajax request
if not form.has_key('jsondata'):
return self._fail(req, apache.HTTP_NOT_FOUND)
json_data = json.loads(str(form['jsondata']))
json_data = json_unicode_to_utf8(json_data)
try:
pid = int(json_data['personId'])
except:
raise NotImplementedError("Some error with the parameter from the Ajax request occured.")
webapi.session_bareinit(req)
session = get_session(req)
pinfo = session['personinfo']
# If autoclaim was done already and no new remote systems exist
# in order to autoclaim new papers send the cached result
if not pinfo['orcid']['import_pubs'] and pinfo['autoclaim']['res'] is not None:
autoclaim_data = pinfo['autoclaim']['res']
json_response = {'resultCode': 1, 'result': TEMPLATE.tmpl_autoclaim_box(autoclaim_data, CFG_SITE_LANG, add_box=False, loading=False)}
return json.dumps(json_response)
external_pubs_association = pinfo['autoclaim']['external_pubs_association']
autoclaim_ticket = pinfo['autoclaim']['ticket']
ulevel = pinfo['ulevel']
uid = getUid(req)
params = WebInterfaceBibAuthorIDClaimPages.get_params_to_check_login_info(session)
login_status = webapi.get_login_info(uid, params)
remote_systems = login_status['logged_in_to_remote_systems']
papers_to_autoclaim = set(webapi.get_papers_from_remote_systems(remote_systems, params, external_pubs_association))
already_claimed_recids = set([rec for _, _, rec in get_claimed_papers_of_author(pid)]) & papers_to_autoclaim
papers_to_autoclaim = papers_to_autoclaim - set([rec for _, _, rec in get_claimed_papers_of_author(pid)])
for paper in papers_to_autoclaim:
operation_parts = {'pid': pid,
'action': 'assign',
'bibrefrec': str(paper)}
operation_to_be_added = webapi.construct_operation(operation_parts, pinfo, uid)
if operation_to_be_added is None:
# In case the operation could not be created (because of an
# erroneous bibrefrec) ignore it and continue with the rest
continue
webapi.add_operation_to_ticket(operation_to_be_added, autoclaim_ticket)
additional_info = {'first_name': '', 'last_name': '', 'email': '',
'comments': 'Assigned automatically when autoclaim was triggered.'}
userinfo = webapi.fill_out_userinfo(additional_info, uid, req.remote_ip, ulevel, strict_check=False)
webapi.commit_operations_from_ticket(autoclaim_ticket, userinfo, uid, ulevel)
autoclaim_data = dict()
autoclaim_data['hidden'] = False
autoclaim_data['person_id'] = pid
autoclaim_data['successfull_recids'] = set([op['rec'] for op in webapi.get_ticket_status(autoclaim_ticket) if 'execution_result' in op]) | already_claimed_recids
webapi.clean_ticket(autoclaim_ticket)
autoclaim_data['unsuccessfull_recids'] = [op['rec'] for op in webapi.get_ticket_status(autoclaim_ticket)]
autoclaim_data['num_of_unsuccessfull_recids'] = len(autoclaim_data['unsuccessfull_recids'])
autoclaim_data['recids_to_external_ids'] = dict()
for key, value in external_pubs_association.iteritems():
ext_system, ext_id = key
rec = value
title = get_title_of_paper(rec)
autoclaim_data['recids_to_external_ids'][rec] = title
# cache the result in the session
pinfo['autoclaim']['res'] = autoclaim_data
if pinfo['orcid']['import_pubs']:
pinfo['orcid']['import_pubs'] = False
session.dirty = True
json_response = {'resultCode': 1, 'result': TEMPLATE.tmpl_autoclaim_box(autoclaim_data, CFG_SITE_LANG, add_box=False, loading=False)}
req.write(json.dumps(json_response))
@staticmethod
def get_params_to_check_login_info(session):
def get_params_to_check_login_info_of_arxiv(session):
try:
return session['user_info']
except KeyError:
return None
def get_params_to_check_login_info_of_orcid(session):
pinfo = session['personinfo']
try:
pinfo['orcid']['has_orcid_id'] = bool(get_orcid_id_of_author(pinfo['pid'])[0][0] and pinfo['orcid']['import_pubs'])
except:
pinfo['orcid']['has_orcid_id'] = False
session.dirty = True
return pinfo['orcid']
get_params_for_remote_system = {'arXiv': get_params_to_check_login_info_of_arxiv,
'orcid': get_params_to_check_login_info_of_orcid}
params = dict()
for system, get_params in get_params_for_remote_system.iteritems():
params[system] = get_params(session)
return params
@staticmethod
def _claim_paper_box(person_id):
'''
Proccess and collect data for claim paper box
@param person_id: person id of the current page's profile
@type person_id: int
@return: data required to built the claim paper box
@rtype: dict
'''
claim_paper_data = dict()
claim_paper_data['canonical_id'] = str(webapi.get_canonical_id_from_person_id(person_id))
return claim_paper_data
@staticmethod
def _support_box():
'''
Proccess and collect data for support box
@return: data required to built the support box
@rtype: dict
'''
support_data = dict()
return support_data
@staticmethod
def _merge_box(person_id):
'''
Proccess and collect data for merge box
@param person_id: person id of the current page's profile
@type person_id: int
@return: data required to built the merge box
@rtype: dict
'''
merge_data = dict()
search_param = webapi.get_canonical_id_from_person_id(person_id)
name_variants = [element[0] for element in webapi.get_person_names_from_id(person_id)]
relevant_name = most_relevant_name(name_variants)
if relevant_name:
search_param = relevant_name.split(",")[0]
merge_data['search_param'] = search_param
merge_data['canonical_id'] = webapi.get_canonical_id_from_person_id(person_id)
return merge_data
@staticmethod
def _internal_ids_box(person_id, user_pid, ulevel):
'''
Proccess and collect data for external_ids box
@param person_id: person id of the current page's profile
@type person_id: int
@param user_pid: person id of the user
@type user_pid: int
@param remote_logged_in_systems: the remote logged in systems
@type remote_logged_in_systems: list
@return: data required to built the external_ids box
@rtype: dict
'''
external_ids_data = dict()
external_ids_data['uid'],external_ids_data['old_uids'] = webapi.get_internal_user_id_from_person_id(person_id)
external_ids_data['person_id'] = person_id
external_ids_data['user_pid'] = user_pid
external_ids_data['ulevel'] = ulevel
return external_ids_data
@staticmethod
def _external_ids_box(person_id, user_pid, ulevel):
'''
Proccess and collect data for external_ids box
@param person_id: person id of the current page's profile
@type person_id: int
@param user_pid: person id of the user
@type user_pid: int
@param remote_logged_in_systems: the remote logged in systems
@type remote_logged_in_systems: list
@return: data required to built the external_ids box
@rtype: dict
'''
internal_ids_data = dict()
internal_ids_data['ext_ids'] = webapi.get_external_ids_from_person_id(person_id)
internal_ids_data['person_id'] = person_id
internal_ids_data['user_pid'] = user_pid
internal_ids_data['ulevel'] = ulevel
return internal_ids_data
@staticmethod
def _hepnames_box(person_id):
return webapi.get_hepnames(person_id)
def tickets_admin(self, req, form):
'''
Generate SSO landing/welcome page
@param req: Apache request object
@type req: Apache request object
@param form: GET/POST request params
@type form: dict
'''
argd = wash_urlargd(form, {'ln': (str, CFG_SITE_LANG)})
ln = argd['ln']
webapi.session_bareinit(req)
no_access = self._page_access_permission_wall(req, req_level='admin')
if no_access:
return no_access
session = get_session(req)
pinfo = session['personinfo']
cname = ''
is_owner = False
last_visited_pid = webapi.history_get_last_visited_pid(pinfo['visit_diary'])
if last_visited_pid is not None:
cname = webapi.get_canonical_id_from_person_id(last_visited_pid)
is_owner = self._is_profile_owner(last_visited_pid)
menu = WebProfileMenu(str(cname), "open_tickets", ln, is_owner, self._is_admin(pinfo))
title = "Open RT tickets"
profile_page = WebProfilePage("help", title, no_cache=True)
profile_page.add_profile_menu(menu)
tickets = webapi.get_persons_with_open_tickets_list()
tickets = list(tickets)
for t in list(tickets):
tickets.remove(t)
tickets.append([webapi.get_most_frequent_name_from_pid(int(t[0])),
webapi.get_person_redirect_link(t[0]), t[0], t[1]])
content = TEMPLATE.tmpl_tickets_admin(tickets)
content = TEMPLATE.tmpl_person_detail_layout(content)
body = profile_page.get_wrapped_body(content)
return page(title=title,
metaheaderadd=profile_page.get_head().encode('utf-8'),
body=body.encode('utf-8'),
req=req,
language=ln,
show_title_p=False)
def help(self, req, form):
argd = wash_urlargd(form, {'ln': (str, CFG_SITE_LANG)})
ln = argd['ln']
_ = gettext_set_language(ln)
if not CFG_INSPIRE_SITE:
return page_not_authorized(req, text=_("This page is not accessible directly."))
webapi.session_bareinit(req)
session = get_session(req)
pinfo = session['personinfo']
cname = ''
is_owner = False
last_visited_pid = webapi.history_get_last_visited_pid(pinfo['visit_diary'])
if last_visited_pid is not None:
cname = webapi.get_canonical_id_from_person_id(last_visited_pid)
is_owner = self._is_profile_owner(last_visited_pid)
menu = WebProfileMenu(str(cname), "help", ln, is_owner, self._is_admin(pinfo))
title = "Help page"
profile_page = WebProfilePage("help", title, no_cache=True)
profile_page.add_profile_menu(menu)
content = TEMPLATE.tmpl_help_page()
body = profile_page.get_wrapped_body(content)
return page(title=title,
metaheaderadd=profile_page.get_head().encode('utf-8'),
body=body.encode('utf-8'),
req=req,
language=ln,
show_title_p=False)
def export(self, req, form):
'''
Generate JSONized export of Person data
@param req: Apache request object
@type req: Apache request object
@param form: GET/POST request params
@type form: dict
'''
argd = wash_urlargd(
form,
{'ln': (str, CFG_SITE_LANG),
'request': (str, None),
'userid': (str, None)})
if not CFG_JSON_AVAILABLE:
return "500_json_not_found__install_package"
# session = get_session(req)
request = None
userid = None
if "userid" in argd and argd['userid']:
userid = argd['userid']
else:
return "404_user_not_found"
if "request" in argd and argd['request']:
request = argd["request"]
# find user from ID
user_email = get_email_from_username(userid)
if user_email == userid:
return "404_user_not_found"
uid = get_uid_from_email(user_email)
uinfo = collect_user_info(uid)
# find person by uid
pid = webapi.get_pid_from_uid(uid)
# find papers py pid that are confirmed through a human.
papers = webapi.get_papers_by_person_id(pid, 2)
# filter by request param, e.g. arxiv
if not request:
return "404__no_filter_selected"
if not request in VALID_EXPORT_FILTERS:
return "500_filter_invalid"
if request == "arxiv":
query = "(recid:"
query += " OR recid:".join(papers)
query += ") AND 037:arxiv"
db_docs = perform_request_search(p=query, rg=0)
nickmail = ""
nickname = ""
db_arxiv_ids = []
try:
nickname = uinfo["nickname"]
except KeyError:
pass
if not nickname:
try:
nickmail = uinfo["email"]
except KeyError:
nickmail = user_email
nickname = nickmail
db_arxiv_ids = get_fieldvalues(db_docs, "037__a")
construct = {"nickname": nickname,
"claims": ";".join(db_arxiv_ids)}
jsondmp = json.dumps(construct)
signature = webapi.sign_assertion("arXiv", jsondmp)
construct["digest"] = signature
return json.dumps(construct)
index = __call__
class WebInterfaceBibAuthorIDManageProfilePages(WebInterfaceDirectory):
_exports = ['',
'import_orcid_pubs',
'connect_author_with_hepname',
'connect_author_with_hepname_ajax',
'suggest_orcid',
'suggest_orcid_ajax']
def _lookup(self, component, path):
'''
This handler parses dynamic URLs:
- /author/profile/1332 shows the page of author with id: 1332
- /author/profile/100:5522,1431 shows the page of the author
identified by the bibrefrec: '100:5522,1431'
'''
if not component in self._exports:
return WebInterfaceBibAuthorIDManageProfilePages(component), path
def _is_profile_owner(self, pid):
return self.person_id == int(pid)
def _is_admin(self, pinfo):
return pinfo['ulevel'] == 'admin'
def __init__(self, identifier=None):
'''
Constructor of the web interface.
@param identifier: identifier of an author. Can be one of:
- an author id: e.g. "14"
- a canonical id: e.g. "J.R.Ellis.1"
- a bibrefrec: e.g. "100:1442,155"
@type identifier: str
'''
self.person_id = -1 # -1 is a non valid author identifier
if identifier is None or not isinstance(identifier, str):
return
self.original_identifier = identifier
# check if it's a canonical id: e.g. "J.R.Ellis.1"
try:
pid = int(identifier)
except ValueError:
pid = int(webapi.get_person_id_from_canonical_id(identifier))
if pid >= 0:
self.person_id = pid
return
# check if it's an author id: e.g. "14"
try:
pid = int(identifier)
if webapi.author_has_papers(pid):
self.person_id = pid
return
except ValueError:
pass
# check if it's a bibrefrec: e.g. "100:1442,155"
if webapi.is_valid_bibref(identifier):
pid = int(webapi.get_person_id_from_paper(identifier))
if pid >= 0:
self.person_id = pid
return
def __call__(self, req, form):
'''
Generate SSO landing/author management page
@param req: Apache request object
@type req: Apache request object
@param form: GET/POST request params
@type form: dict
'''
webapi.session_bareinit(req)
session = get_session(req)
pinfo = session['personinfo']
ulevel = pinfo['ulevel']
person_id = self.person_id
uid = getUid(req)
pinfo['claim_in_process'] = True
argd = wash_urlargd(form, {
'ln': (str, CFG_SITE_LANG),
'verbose': (int, 0)})
debug = "verbose" in argd and argd["verbose"] > 0
ln = argd['ln']
_ = gettext_set_language(ln)
if not CFG_INSPIRE_SITE or self.person_id is None:
return page_not_authorized(req, text=_("This page is not accessible directly."))
if person_id < 0:
return self._error_page(req, message=("Identifier %s is not a valid person identifier or does not exist anymore!" % self.original_identifier))
# log the visit
webapi.history_log_visit(req, 'manage_profile', pid=person_id)
# store the arxiv papers the user owns
if uid > 0 and not pinfo['arxiv_status']:
uinfo = collect_user_info(req)
arxiv_papers = list()
if 'external_arxivids' in uinfo and uinfo['external_arxivids']:
arxiv_papers = uinfo['external_arxivids'].split(';')
if arxiv_papers:
webapi.add_arxiv_papers_to_author(arxiv_papers, person_id)
pinfo['arxiv_status'] = True
params = WebInterfaceBibAuthorIDClaimPages.get_params_to_check_login_info(session)
login_info = webapi.get_login_info(uid, params)
title_message = _('Profile management')
ssl_param = 0
if req.is_https():
ssl_param = 1
# Create Wrapper Page Markup
cname = webapi.get_canonical_id_from_person_id(self.person_id)
if cname == self.person_id:
return page_not_authorized(req, text=_("This page is not accessible directly."))
menu = WebProfileMenu(cname, "manage_profile", ln, self._is_profile_owner(pinfo['pid']), self._is_admin(pinfo))
profile_page = WebProfilePage("manage_profile", webapi.get_longest_name_from_pid(self.person_id), no_cache=True)
profile_page.add_profile_menu(menu)
gboxstatus = self.person_id
gpid = self.person_id
gNumOfWorkers = 3 # to do: read it from conf file
gReqTimeout = 3000
gPageTimeout = 12000
profile_page.add_bootstrapped_data(json.dumps({
"other": "var gBOX_STATUS = '%s';var gPID = '%s'; var gNumOfWorkers= '%s'; var gReqTimeout= '%s'; var gPageTimeout= '%s';" % (gboxstatus, gpid, gNumOfWorkers, gReqTimeout, gPageTimeout),
"backbone": """
(function(ticketbox) {
var app = ticketbox.app;
app.userops.set(%s);
app.bodyModel.set({userLevel: "%s"});
})(ticketbox);""" % (WebInterfaceAuthorTicketHandling.bootstrap_status(pinfo, "user"), ulevel)
}))
if debug:
profile_page.add_debug_info(pinfo)
user_pid = webapi.get_user_pid(login_info['uid'])
person_data = webapi.get_person_info_by_pid(person_id)
# proccess and collect data for every box [LEGACY]
arxiv_data = WebInterfaceBibAuthorIDClaimPages._arxiv_box(req, login_info, person_id, user_pid)
orcid_data = WebInterfaceBibAuthorIDClaimPages._orcid_box(arxiv_data['login'], person_id, user_pid, ulevel)
claim_paper_data = WebInterfaceBibAuthorIDClaimPages._claim_paper_box(person_id)
support_data = WebInterfaceBibAuthorIDClaimPages._support_box()
ext_ids_data = None
int_ids_data = None
if ulevel == 'admin':
ext_ids_data = WebInterfaceBibAuthorIDClaimPages._external_ids_box(person_id, user_pid, ulevel)
int_ids_data = WebInterfaceBibAuthorIDClaimPages._internal_ids_box(person_id, user_pid, ulevel)
autoclaim_data = WebInterfaceBibAuthorIDClaimPages._autoclaim_papers_box(req, person_id, user_pid, login_info['logged_in_to_remote_systems'])
merge_data = WebInterfaceBibAuthorIDClaimPages._merge_box(person_id)
hepnames_data = WebInterfaceBibAuthorIDClaimPages._hepnames_box(person_id)
content = ''
# display status for any previously attempted merge
if pinfo['merge_info_message']:
teaser_key, message = pinfo['merge_info_message']
content += TEMPLATE.tmpl_merge_transaction_box(teaser_key, [message])
pinfo['merge_info_message'] = None
session.dirty = True
content += TEMPLATE.tmpl_profile_management(ln, person_data, arxiv_data,
orcid_data, claim_paper_data,
int_ids_data, ext_ids_data,
autoclaim_data, support_data,
merge_data, hepnames_data)
body = profile_page.get_wrapped_body(content)
return page(title=title_message,
metaheaderadd=profile_page.get_head().encode('utf-8'),
body=body.encode('utf-8'),
req=req,
language=ln,
show_title_p=False)
def import_orcid_pubs(self, req, form):
webapi.session_bareinit(req)
session = get_session(req)
pinfo = session['personinfo']
orcid_info = pinfo['orcid']
# author should have already an orcid if this method was triggered
orcid_id = get_orcid_id_of_author(pinfo['pid'])[0][0]
orcid_dois = get_dois_from_orcid(orcid_id)
# TODO: what to do in case some ORCID server error occurs?
if orcid_dois is None:
redirect_to_url(req, "%s/author/manage_profile/%s" % (CFG_SITE_SECURE_URL, pinfo['pid']))
# TODO: it would be smarter if:
# 1. we save in the db the orcid_dois
# 2. to expire only the external pubs box in the profile page
webauthorapi.expire_all_cache_for_personid(pinfo['pid'])
orcid_info['imported_pubs'] = orcid_dois
orcid_info['import_pubs'] = True
session.dirty = True
redirect_to_url(req, "%s/author/manage_profile/%s" % (CFG_SITE_SECURE_URL, pinfo['pid']))
def connect_author_with_hepname(self, req, form):
argd = wash_urlargd(form, {'cname':(str, None),
'hepname': (str, None),
'ln': (str, CFG_SITE_LANG)})
ln = argd['ln']
if argd['cname'] is not None:
cname = argd['cname']
else:
return self._error_page(req, ln, "Fatal: cannot associate a hepname without a person id.")
if argd['hepname'] is not None:
hepname = argd['hepname']
else:
return self._error_page(req, ln, "Fatal: cannot associate an author with a non valid hepname.")
webapi.connect_author_with_hepname(cname, hepname)
webapi.session_bareinit(req)
session = get_session(req)
pinfo = session['personinfo']
last_visited_page = webapi.history_get_last_visited_url(pinfo['visit_diary'], just_page=True)
redirect_to_url(req, "%s/author/%s/%s" % (CFG_SITE_URL, last_visited_page, cname))
def connect_author_with_hepname_ajax(self, req, form):
'''
Function used for handling Ajax requests.
@param req: apache request object
@type req: apache request object
@param form: parameters sent via Ajax request
@type form: dict
@return:
@rtype: json data
'''
# Abort if the simplejson module isn't available
assert CFG_JSON_AVAILABLE, "Json not available"
# Fail if no json data exists in the Ajax request
if not form.has_key('jsondata'):
return self._fail(req, apache.HTTP_NOT_FOUND)
json_data = json.loads(str(form['jsondata']))
json_data = json_unicode_to_utf8(json_data)
try:
cname = json_data['cname']
hepname = json_data['hepname']
except:
return self._fail(req, apache.HTTP_NOT_FOUND)
session = get_session(req)
pinfo = session['personinfo']
if not self._is_admin(pinfo):
webapi.connect_author_with_hepname(cname, hepname)
else:
uid = getUid(req)
add_cname_to_hepname_record(cname, hepname, uid)
def suggest_orcid(self, req, form):
argd = wash_urlargd(form, {'orcid':(str, None),
'pid': (int, -1),
'ln': (str, CFG_SITE_LANG)})
ln = argd['ln']
if argd['pid'] > -1:
pid = argd['pid']
else:
return self._error_page(req, ln, "Fatal: cannot associate an orcid without a person id.")
if argd['orcid'] is not None and is_valid_orcid(argd['orcid']):
orcid = argd['orcid']
else:
return self._error_page(req, ln, "Fatal: cannot associate an author with a non valid ORCiD.")
webapi.connect_author_with_orcid(webapi.get_canonical_id_from_person_id(pid), orcid)
redirect_to_url(req, "%s/author/manage_profile/%s" % (CFG_SITE_URL, pid))
def suggest_orcid_ajax(self, req, form):
'''
Function used for handling Ajax requests.
@param req: apache request object
@type req: apache request object
@param form: parameters sent via Ajax request
@type form: dict
@return:
@rtype: json data
'''
# Abort if the simplejson module isn't available
assert CFG_JSON_AVAILABLE, "Json not available"
# Fail if no json data exists in the Ajax request
if not form.has_key('jsondata'):
return self._fail(req, apache.HTTP_NOT_FOUND)
json_data = json.loads(str(form['jsondata']))
json_data = json_unicode_to_utf8(json_data)
try:
orcid = json_data['orcid']
pid = json_data['pid']
except:
return self._fail(req, apache.HTTP_NOT_FOUND)
if not is_valid_orcid(orcid):
return self._fail(req, apache.HTTP_NOT_FOUND)
webapi.connect_author_with_orcid(webapi.get_canonical_id_from_person_id(pid), orcid)
def _fail(self, req, code):
req.status = code
return
def _error_page(self, req, ln=CFG_SITE_LANG, message=None, intro=True):
'''
Create a page that contains a message explaining the error.
@param req: Apache Request Object
@type req: Apache Request Object
@param ln: language
@type ln: string
@param message: message to be displayed
@type message: string
'''
body = []
_ = gettext_set_language(ln)
if not message:
message = "No further explanation available. Sorry."
if intro:
body.append(_("<p>We're sorry. An error occurred while "
"handling your request. Please find more information "
"below:</p>"))
body.append("<p><strong>%s</strong></p>" % message)
return page(title=_("Notice"),
body="\n".join(body),
description="%s - Internal Error" % CFG_SITE_NAME,
keywords="%s, Internal Error" % CFG_SITE_NAME,
language=ln,
req=req)
index = __call__
class WebInterfaceAuthorTicketHandling(WebInterfaceDirectory):
_exports = ['get_status',
'update_status',
'add_operation',
'modify_operation',
'remove_operation',
'commit',
'abort']
@staticmethod
def bootstrap_status(pinfo, on_ticket):
'''
Function used for generating get_status json bootstrapping.
@param pinfo: person_info
@type req: dict
@param on_ticket: ticket target
@type on_ticket: str
@return:
@rtype: json data
'''
# Abort if the simplejson module isn't available
assert CFG_JSON_AVAILABLE, "Json not available"
author_ticketing = WebInterfaceAuthorTicketHandling()
ticket = author_ticketing._get_according_ticket(on_ticket, pinfo)
if ticket is None:
return "{}"
ticket_status = webapi.get_ticket_status(ticket)
return json.dumps(ticket_status)
def get_status(self, req, form):
'''
Function used for handling Ajax requests.
@param req: apache request object
@type req: apache request object
@param form: parameters sent via Ajax request
@type form: dict
@return:
@rtype: json data
'''
# Abort if the simplejson module isn't available
assert CFG_JSON_AVAILABLE, "Json not available"
# Fail if no json data exists in the Ajax request
if not form.has_key('jsondata'):
return self._fail(req, apache.HTTP_NOT_FOUND)
json_data = json.loads(str(form['jsondata']))
json_data = json_unicode_to_utf8(json_data)
try:
on_ticket = json_data['on']
except:
return self._fail(req, apache.HTTP_NOT_FOUND)
webapi.session_bareinit(req)
session = get_session(req)
pinfo = session['personinfo']
ticket = self._get_according_ticket(on_ticket, pinfo)
if ticket is None:
return self._fail(req, apache.HTTP_NOT_FOUND)
ticket_status = webapi.get_ticket_status(ticket)
session.dirty = True
req.content_type = 'application/json'
req.write(json.dumps(ticket_status))
def update_status(self, req, form):
'''
Function used for handling Ajax requests.
@param req: apache request object
@type req: apache request object
@param form: parameters sent via Ajax request
@type form: dict
@return:
@rtype: json data
'''
# Abort if the simplejson module isn't available
assert CFG_JSON_AVAILABLE, "Json not available"
# Fail if no json data exists in the Ajax request
if not form.has_key('jsondata'):
return self._fail(req, apache.HTTP_NOT_FOUND)
json_data = json.loads(str(form['jsondata']))
json_data = json_unicode_to_utf8(json_data)
try:
on_ticket = json_data['on']
except:
return self._fail(req, apache.HTTP_NOT_FOUND)
webapi.session_bareinit(req)
session = get_session(req)
pinfo = session['personinfo']
ticket = self._get_according_ticket(on_ticket, pinfo)
if ticket is None:
return self._fail(req, apache.HTTP_NOT_FOUND)
webapi.update_ticket_status(ticket)
session.dirty = True
def add_operation(self, req, form):
'''
Function used for handling Ajax requests.
@param req: apache request object
@type req: apache request object
@param form: parameters sent via Ajax request
@type form: dict
@return:
@rtype: json data
'''
# Abort if the simplejson module isn't available
assert CFG_JSON_AVAILABLE, "Json not available"
# Fail if no json data exists in the Ajax request
if not form.has_key('jsondata'):
return self._fail(req, apache.HTTP_NOT_FOUND)
json_data = json.loads(str(form['jsondata']))
json_data = json_unicode_to_utf8(json_data)
try:
operation_parts = {'pid': int(json_data['pid']),
'action': json_data['action'],
'bibrefrec': json_data['bibrefrec']}
on_ticket = json_data['on']
except:
return self._fail(req, apache.HTTP_NOT_FOUND)
webapi.session_bareinit(req)
session = get_session(req)
pinfo = session['personinfo']
uid = getUid(req)
operation_to_be_added = webapi.construct_operation(operation_parts, pinfo, uid)
if operation_to_be_added is None:
return self._fail(req, apache.HTTP_NOT_FOUND)
ticket = self._get_according_ticket(on_ticket, pinfo)
if ticket is None:
return self._fail(req, apache.HTTP_NOT_FOUND)
webapi.add_operation_to_ticket(operation_to_be_added, ticket)
session.dirty = True
def modify_operation(self, req, form):
'''
Function used for handling Ajax requests.
@param req: apache request object
@type req: apache request object
@param form: parameters sent via Ajax request
@type form: dict
@return:
@rtype: json data
'''
# Abort if the simplejson module isn't available
assert CFG_JSON_AVAILABLE, "Json not available"
# Fail if no json data exists in the Ajax request
if not form.has_key('jsondata'):
return self._fail(req, apache.HTTP_NOT_FOUND)
json_data = json.loads(str(form['jsondata']))
json_data = json_unicode_to_utf8(json_data)
try:
operation_parts = {'pid': int(json_data['pid']),
'action': json_data['action'],
'bibrefrec': json_data['bibrefrec']}
on_ticket = json_data['on']
except:
return self._fail(req, apache.HTTP_NOT_FOUND)
webapi.session_bareinit(req)
session = get_session(req)
pinfo = session['personinfo']
uid = getUid(req)
operation_to_be_modified = webapi.construct_operation(operation_parts, pinfo, uid, should_have_bibref=False)
if operation_to_be_modified is None:
return self._fail(req, apache.HTTP_NOT_FOUND)
ticket = self._get_according_ticket(on_ticket, pinfo)
if ticket is None:
return self._fail(req, apache.HTTP_NOT_FOUND)
operation_is_modified = webapi.modify_operation_from_ticket(operation_to_be_modified, ticket)
if not operation_is_modified:
# Operation couldn't be modified because it doesn't exist in the
# ticket. Wrong parameters were given hence we should fail!
return self._fail(req, apache.HTTP_NOT_FOUND)
session.dirty = True
def remove_operation(self, req, form):
'''
Function used for handling Ajax requests.
@param req: apache request object
@type req: apache request object
@param form: parameters sent via Ajax request
@type form: dict
@return:
@rtype: json data
'''
# Abort if the simplejson module isn't available
assert CFG_JSON_AVAILABLE, "Json not available"
# Fail if no json data exists in the Ajax request
if not form.has_key('jsondata'):
return self._fail(req, apache.HTTP_NOT_FOUND)
json_data = json.loads(str(form['jsondata']))
json_data = json_unicode_to_utf8(json_data)
try:
operation_parts = {'pid': int(json_data['pid']),
'action': json_data['action'],
'bibrefrec': json_data['bibrefrec']}
on_ticket = json_data['on']
except:
return self._fail(req, apache.HTTP_NOT_FOUND)
webapi.session_bareinit(req)
session = get_session(req)
pinfo = session['personinfo']
uid = getUid(req)
operation_to_be_removed = webapi.construct_operation(operation_parts, pinfo, uid)
if operation_to_be_removed is None:
return self._fail(req, apache.HTTP_NOT_FOUND)
ticket = self._get_according_ticket(on_ticket, pinfo)
if ticket is None:
return self._fail(req, apache.HTTP_NOT_FOUND)
operation_is_removed = webapi.remove_operation_from_ticket(operation_to_be_removed, ticket)
if not operation_is_removed:
# Operation couldn't be removed because it doesn't exist in the
# ticket. Wrong parameters were given hence we should fail!
return self._fail(req, apache.HTTP_NOT_FOUND)
session.dirty = True
def commit(self, req, form):
'''
Function used for handling Ajax requests.
@param req: apache request object
@type req: apache request object
@param form: parameters sent via Ajax request
@type form: dict
@return:
@rtype: json data
'''
# Abort if the simplejson module isn't available
assert CFG_JSON_AVAILABLE, "Json not available"
# Fail if no json data exists in the Ajax request
if not form.has_key('jsondata'):
return self._fail(req, apache.HTTP_NOT_FOUND)
json_data = json.loads(str(form['jsondata']))
json_data = json_unicode_to_utf8(json_data)
try:
additional_info = {'first_name': json_data.get('first_name',"Default"),
'last_name': json_data.get('last_name',"Default"),
'email': json_data.get('email',"Default"),
'comments': json_data['comments']}
on_ticket = json_data['on']
except:
return self._fail(req, apache.HTTP_NOT_FOUND)
webapi.session_bareinit(req)
session = get_session(req)
pinfo = session['personinfo']
ulevel = pinfo['ulevel']
uid = getUid(req)
user_is_guest = isGuestUser(uid)
if not user_is_guest:
try:
additional_info['first_name'] = session['user_info']['external_firstname']
additional_info['last_name'] = session['user_info']['external_familyname']
additional_info['email'] = session['user_info']['email']
except KeyError:
additional_info['first_name'] = additional_info['last_name'] = additional_info['email'] = str(uid)
ticket = self._get_according_ticket(on_ticket, pinfo)
if ticket is None:
return self._fail(req, apache.HTTP_NOT_FOUND)
# When a guest is claiming we should not commit if he
# doesn't provide us his full personal information
strict_check = user_is_guest
userinfo = webapi.fill_out_userinfo(additional_info, uid, req.remote_ip, ulevel, strict_check=strict_check)
if userinfo is None:
return self._fail(req, apache.HTTP_NOT_FOUND)
webapi.commit_operations_from_ticket(ticket, userinfo, uid, ulevel)
session.dirty = True
def abort(self, req, form):
'''
Function used for handling Ajax requests.
@param req: apache request object
@type req: apache request object
@param form: parameters sent via Ajax request
@type form: dict
@return:
@rtype: json data
'''
# Abort if the simplejson module isn't available
assert CFG_JSON_AVAILABLE, "Json not available"
# Fail if no json data exists in the Ajax request
if not form.has_key('jsondata'):
return self._fail(req, apache.HTTP_NOT_FOUND)
json_data = json.loads(str(form['jsondata']))
json_data = json_unicode_to_utf8(json_data)
try:
on_ticket = json_data['on']
except:
return self._fail(req, apache.HTTP_NOT_FOUND)
webapi.session_bareinit(req)
session = get_session(req)
pinfo = session['personinfo']
ticket = self._get_according_ticket(on_ticket, pinfo)
if ticket is None:
return self._fail(req, apache.HTTP_NOT_FOUND)
# When a user is claiming we should completely delete his ticket if he
# aborts the claiming procedure
delete_ticket = (on_ticket == 'user')
webapi.abort_ticket(ticket, delete_ticket=delete_ticket)
session.dirty = True
def _get_according_ticket(self, on_ticket, pinfo):
ticket = None
if on_ticket == 'user':
ticket = pinfo['ticket']
elif on_ticket == 'autoclaim':
ticket = pinfo['autoclaim']['ticket']
return ticket
def _fail(self, req, code):
req.status = code
return
class WebAuthorSearch(WebInterfaceDirectory):
"""
Provides an interface to profile search using AJAX queries.
"""
_exports = ['list',
'details']
# This class requires JSON libraries
assert CFG_JSON_AVAILABLE, "[WebAuthorSearch] JSON must be enabled."
class QueryPerson(WebInterfaceDirectory):
_exports = ['']
MIN_QUERY_LENGTH = 2
QUERY_REGEX = re.compile(r"[\w\s\.\-,@]+$", re.UNICODE)
def __init__(self, query=None):
self.query = query
def _lookup(self, component, path):
if component not in self._exports:
return WebAuthorSearch.QueryPerson(component), path
def __call__(self, req, form):
if self.query is None or len(self.query) < self.MIN_QUERY_LENGTH:
req.status = apache.HTTP_BAD_REQUEST
return "Query too short"
if not self.QUERY_REGEX.match(self.query):
req.status = apache.HTTP_BAD_REQUEST
return "Invalid query."
pid_results = [{"pid": pid[0]} for pid in webapi.search_person_ids_by_name(self.query)]
req.content_type = 'application/json'
return json.dumps(pid_results)
# Request for index handled by __call__
index = __call__
def _JSON_received(self, form):
try:
return "jsondata" in form
except TypeError:
return False
def _extract_JSON(self, form):
try:
json_data = json.loads(str(form['jsondata']))
json_data = json_unicode_to_utf8(json_data)
return json_data
except ValueError:
return None
def _get_pid_details(self, pid):
details = webapi.get_person_info_by_pid(pid)
details.update({
"names": [{"name": x, "paperCount": y} for x, y in webapi.get_person_names_from_id(pid)],
"externalIds": [{x: y} for x, y in webapi.get_external_ids_from_person_id(pid).items()]
})
details['cname'] = details.pop("canonical_name", None)
return details
def details(self, req, form):
if self._JSON_received(form):
try:
json_data = self._extract_JSON(form)
pids = json_data['pids']
req.content_type = 'application/json'
details = [self._get_pid_details(pid) for pid in pids]
return json.dumps(details)
except (TypeError, KeyError):
req.status = apache.HTTP_BAD_REQUEST
return "Invalid query."
else:
req.status = apache.HTTP_BAD_REQUEST
return "Incorrect query format."
list = QueryPerson()
class WebInterfaceAuthor(WebInterfaceDirectory):
'''
Handles /author/* pages.
Supplies the methods:
/author/choose_profile
/author/claim/
/author/help
/author/manage_profile
/author/merge_profiles
/author/profile/
/author/search
/author/ticket/
'''
_exports = ['',
'choose_profile',
'claim',
'help',
'manage_profile',
'merge_profiles',
'profile',
'search',
'search_ajax',
'ticket']
from invenio.webauthorprofile_webinterface import WebAuthorPages
claim = WebInterfaceBibAuthorIDClaimPages()
profile = WebAuthorPages()
choose_profile = claim.choose_profile
help = claim.help
manage_profile = WebInterfaceBibAuthorIDManageProfilePages()
merge_profiles = claim.merge_profiles
search = claim.search
search_ajax = WebAuthorSearch()
ticket = WebInterfaceAuthorTicketHandling()
def _lookup(self, component, path):
if component not in self._exports:
return WebInterfaceAuthor(component), path
def __init__(self, component=None):
self.path = component
def __call__(self, req, form):
if self.path is None or len(self.path) < 1:
redirect_to_url(req, "%s/author/search" % CFG_BASE_URL)
# Check if canonical id: e.g. "J.R.Ellis.1"
pid = get_person_id_from_canonical_id(self.path)
if pid >= 0:
url = "%s/author/profile/%s" % (CFG_BASE_URL, get_person_redirect_link(pid))
redirect_to_url(req, url, redirection_type=apache.HTTP_MOVED_PERMANENTLY)
return
else:
try:
pid = int(self.path)
except ValueError:
redirect_to_url(req, "%s/author/search?q=%s" % (CFG_BASE_URL, self.path))
return
else:
if author_has_papers(pid):
cid = get_person_redirect_link(pid)
if is_valid_canonical_id(cid):
redirect_id = cid
else:
redirect_id = pid
url = "%s/author/profile/%s" % (CFG_BASE_URL, redirect_id)
redirect_to_url(req, url, redirection_type=apache.HTTP_MOVED_PERMANENTLY)
return
redirect_to_url(req, "%s/author/search" % CFG_BASE_URL)
return
index = __call__
class WebInterfacePerson(WebInterfaceDirectory):
'''
Handles /person/* pages.
Supplies the methods:
/person/welcome
'''
_exports = ['welcome','update', 'you']
def welcome(self, req, form):
redirect_to_url(req, "%s/author/choose_profile" % CFG_SITE_SECURE_URL)
def you(self, req, form):
redirect_to_url(req, "%s/author/choose_profile" % CFG_SITE_SECURE_URL)
def update(self, req, form):
"""
Generate hepnames update form
"""
argd = wash_urlargd(form,
{'ln': (str, CFG_SITE_LANG),
'email': (str, ''),
'IRN': (str, ''),
})
# Retrieve info for HEP name based on email or IRN
recids = []
if argd['email']:
recids = perform_request_search(p="371__m:%s" % argd['email'], cc="HepNames")
elif argd['IRN']:
recids = perform_request_search(p="001:%s" % argd['IRN'], cc="HepNames")
else:
redirect_to_url(req, "%s/collection/HepNames" % (CFG_SITE_URL))
if not recids:
redirect_to_url(req, "%s/collection/HepNames" % (CFG_SITE_URL))
else:
hepname_bibrec = get_bibrecord(recids[0])
# Extract all info from recid that should be included in the form
full_name = record_get_field_value(hepname_bibrec, tag="100", ind1="", ind2="", code="a")
display_name = record_get_field_value(hepname_bibrec, tag="880", ind1="", ind2="", code="a")
email = record_get_field_value(hepname_bibrec, tag="371", ind1="", ind2="", code="m")
status = record_get_field_value(hepname_bibrec, tag="100", ind1="", ind2="", code="g")
keynumber = record_get_field_value(hepname_bibrec, tag="970", ind1="", ind2="", code="a")
try:
keynumber = keynumber.split('-')[1]
except IndexError:
pass
research_field_list = record_get_field_values(hepname_bibrec, tag="650", ind1="1", ind2="7", code="a")
institution_list = []
for instance in record_get_field_instances(hepname_bibrec, tag="371", ind1="", ind2=""):
if not instance or field_get_subfield_values(instance, "m"):
continue
institution_info = ["", "", "", "", ""]
if field_get_subfield_values(instance, "a"):
institution_info[0] = field_get_subfield_values(instance, "a")[0]
if field_get_subfield_values(instance, "r"):
institution_info[1] = field_get_subfield_values(instance, "r")[0]
if field_get_subfield_values(instance, "s"):
institution_info[2] = field_get_subfield_values(instance, "s")[0]
if field_get_subfield_values(instance, "t"):
institution_info[3] = field_get_subfield_values(instance, "t")[0]
if field_get_subfield_values(instance, "z"):
institution_info[4] = field_get_subfield_values(instance, "z")[0]
institution_list.append(institution_info)
phd_advisor_list = record_get_field_values(hepname_bibrec, tag="701", ind1="", ind2="", code="a")
experiment_list = record_get_field_values(hepname_bibrec, tag="693", ind1="", ind2="", code="e")
web_page = record_get_field_value(hepname_bibrec, tag="856", ind1="1", ind2="", code="u")
# Create form and pass as parameters all the content from the record
body = TEMPLATE.tmpl_update_hep_name(full_name, display_name, email,
status, research_field_list,
institution_list, phd_advisor_list,
experiment_list, web_page)
title = "HEPNames"
return page(title=title,
metaheaderadd = TEMPLATE.tmpl_update_hep_name_headers(),
body=body,
req=req,
)
# pylint: enable=C0301
# pylint: enable=W0613
|
jmartinm/invenio
|
modules/bibauthorid/lib/bibauthorid_webinterface.py
|
Python
|
gpl-2.0
| 148,619
|
[
"VisIt"
] |
08bc1451d7faacddbd5ccb84fbcad19ce19585f333314938385d24fd45a9605b
|
'''
'''
import os
import zipfile
import requests
import pandas as pd
import igraph as ig
from biomap import BioMap
################################################################################
# data path #
################################################################################
DEREGNET_GRAPH_DATA = os.path.expanduser('~/.deregnet/graphs')
if not os.path.isdir(DEREGNET_GRAPH_DATA):
os.makedirs(DEREGNET_GRAPH_DATA)
################################################################################
# functionality for parsing edge tables to graphs #
################################################################################
def table_to_graph(table,
source_column,
target_column,
directed=True,
attributes=None,
make_edges_unique=True,
exclude=None,
include=None,
graph_attributes=None,
**kwargs):
return ig.Graph(**table_to_igraph_init_kwargs(table,
source_column,
target_column,
directed,
attributes,
make_edges_unique,
exlcude,
include,
graph_attributes,
**kwargs))
#
def table_to_igraph_init_kwargs(table,
source_column,
target_column,
directed=True,
attributes=None,
make_edges_unique=True,
exclude=None,
include=None,
graph_attributes=None,
**kwargs):
def handle_entrez_like_ids(ID):
try:
ID = str(int(float(ID)))
except:
ID = ID
return ID
#
attributes = {} if attributes is None else attributes
graph_attributes = {} if graph_attributes is None else graph_attributes
#
#
if isinstance(table, pd.DataFrame):
data = table
else:
data = pd.read_table(table, **kwargs)
# drop NA's
data.dropna(inplace=True)
# filter
if exclude:
for fltr in exclude:
data = data.loc[~(data[fltr['attr']].isin(fltr['values']))]
elif include:
for fltr in include:
data = data.loc[data[fltr['attr']].isin(fltr['values'])]
#
# return value
kwargs = {}
kwargs['directed'] = directed
kwargs['graph_attrs'] = graph_attributes
# nodes
nodes = set(data[source_column]) | set(data[target_column])
nodes = [handle_entrez_like_ids(node) for node in nodes]
nodes = [node for node in nodes if node.strip()]
node_index = {node: nodes.index(node) for node in nodes}
kwargs['n'] = len(nodes)
kwargs['vertex_attrs'] = {'name': nodes}
# edges
edge_attrs = {}
data['edges'] = list(zip(data[source_column].tolist(), data[target_column].tolist()))
data['edges'] = [(handle_entrez_like_ids(edge[0]), handle_entrez_like_ids(edge[1]))
for edge in data['edges']]
if make_edges_unique:
for attr in attributes:
edge_attrs[attr] = data.groupby('edges')[attr].apply(list).to_dict()
data.drop_duplicates('edges', inplace=True)
else:
for attr in attributes:
edge_attrs[attr] = data[attr].tolist()
edges = data['edges'].tolist()
edge_attrs = {
attributes[attr]: [list(set(edge_attrs[attr][edge])) for edge in edges]
if make_edges_unique else edge_attrs[attr]
for attr in attributes
}
edges = [
(node_index[edge[0]],
node_index[edge[1]])
for edge in edges
]
kwargs['edges'] = edges
kwargs['edge_attrs'] = edge_attrs
return kwargs
def read_sif(sif,
directed=True,
make_edges_unique=True,
exclude=None,
include=None,
**kwargs):
'''
SIF (Simple Interaction Format) Reader.
http://wiki.cytoscape.org/Cytoscape_User_Manual/Network_Formats
WARNING: This function does not support the "a pp x,y,z" notation for
multiple edges in one line. Instead, a line like the one above will
be interpreted as one edge between a node "a" and a node "x,y,z".
Args:
path (str) : Path of sif file
directed (bool) : Whether to interpret the graph as directed
Returns:
ig.Graph : Graph encoded in the SIF file (hopefully;)
'''
return ig.Graph(**sif_to_igraph_init_kwargs(sif,
directed,
make_edges_unique,
exclude,
include,
** kwargs))
def sif_to_igraph_init_kwargs(sif,
directed=True,
make_edges_unique=True,
exclude=None,
include=None,
**kwargs):
'''
'''
exclude = [{'attr': 1, 'values': exclude}] if exclude is not None else None
include = [{'attr': 1, 'values': include}] if include is not None else None
return table_to_igraph_init_kwargs(table=sif,
source_column=0,
target_column=2,
directed=directed,
attributes={1:'interactions' if make_edges_unique else 'interaction'},
make_edges_unique=make_edges_unique,
exclude=exclude,
include=include,
header=None,
**kwargs)
################################################################################
# DeregnetGraph #
################################################################################
class DeregnetGraph(ig.Graph):
def __init__(self,
make_edges_unique=True,
**igraph_init_kwargs):
'''
'''
self.list_valued_interaction_type_attr = make_edges_unique
super().__init__(**igraph_init_kwargs)
@property
def undirected_edge_types(self):
raise NotImplementedError
@property
def edge_type_attribute(self):
return 'interactions' if self.list_valued_interaction_type_attr else 'interaction'
@classmethod
def _download(cls, url, local_file, verbose):
if verbose:
print('Downloading %s ...' % url)
response = requests.get(url)
if response.status_code != 200:
print('Download of %s FAILED.' % url)
with open(local_file, 'wb') as fp:
fp.write(response.content)
def download(self, *args, **kwargs):
raise NotImplementedError
def map_nodes(self, mapper, FROM=None, TO=None, source_attr='name', target_attr='name'):
self.vs[target_attr] = mapper.map(self.vs[source_attr], FROM, TO)
def map_nodes_to_multiple_targets(self, mapper, FROM=None, TO=[None], source_attr='name', target_attrs=['name']):
for target_id_type, target_attr in zip(TO, target_attrs):
self.map_nodes(mapper, FROM, target_id_type, source_attr, target_attr)
def map_nodes_from_dict(self, dct, source_attr='name', target_attr='name', default=None):
self.vs[target_attr] = [dct.get(v[source_attr], default) for v in self.vs]
def change_name_attr(self, new_name_attr, old_name_attr='_name'):
self.vs[old_name_attr] = list(self.vs['name'])
self.vs['name'] = list(self.vs[new_name_attr])
@property
def interaction_types(self):
if self.list_valued_interaction_type_attr:
return { interaction_type for edge in self.es
for interaction_type in edge[self.edge_type_attribute] }
else:
return { edge[self.edge_type_attribute] for edge in self.es }
def expand_nodes(self, node_attr, keep):
'''
'''
node_index = 0
names = []
oldidx2index = {}
index2targets = {}
index2sources = {}
for node in self.vs:
if node[node_attr] is None:
key = keep[0]
value = keep[1]
if not node[key] == value:
continue
names.append(node['name'])
oldidx2index[node.index] = node_index
node_index += 1
else:
for attr_val in node[node_attr]:
names.append(attr_val)
oldidx2index[node.index] = node_index
node_index += 1
num_nodes = node_index
edge2attrs = {(oldidx2index.get(edge.source, None), oldidx2index.get(edge.target, None)): edge.attributes() for edge in self.es}
edge2attrs = { edge: edge2attrs[edge] for edge in edge2attrs if edge[0] is not None and edge[1] is not None }
edges = list(edge2attrs.keys())
edge_attrs = {}
for attr in self.es.attribute_names():
edge_attrs[attr] = [edge2attrs[edge][attr] for edge in edges]
return DeregnetGraph(self.list_valued_interaction_type_attr,
**{'n': num_nodes,
'edges': edges,
'vertex_attrs': {'name': names},
'edge_attrs': edge_attrs,
'directed': self.is_directed()})
def neighborhood_graph(self, nodes, mode=ig.ALL, depth=1, node_attr='symbol'):
# TODO filter
if isinstance(nodes, str):
nodes = [nodes]
neighborhood = set(self.vs.select(**{node_attr+'_in':nodes}))
for d in range(depth):
for node in list(neighborhood):
neighbors = set(self.vs.select(self.neighbors(node)))
neighborhood = neighborhood | neighbors
return self.subgraph(neighborhood, 'create_from_scratch')
def direct_undirected_edges(self, is_directed):
pass
################################################################################
# Reactome FI
################################################################################
class ReactomeFI(DeregnetGraph):
REACTOME_FI_DOWNLOAD_URL = 'http://reactomews.oicr.on.ca:8080/caBigR3WebApp2016'
FILENAME = 'FIsInGene_022717_with_annotations.txt.zip'
def __init__(self, exclude=None, include=None, direct_undirected=False, verbose=True, local_root=None):
self.verbose = verbose
self.download_root = self.REACTOME_FI_DOWNLOAD_URL
self.local_root = os.path.join(DEREGNET_GRAPH_DATA, 'reacomte_fi') if local_root is None else local_root
if not os.path.isdir(self.local_root):
os.makedirs(self.local_root)
if not os.path.isfile(self.filepath):
self.download()
# only directed edges
exclude = [{'attr': 'Direction', 'values': ['-']}] if exclude is None else exclude
igraph_init_kwargs = table_to_igraph_init_kwargs(table=self.filepath,
source_column='Gene1',
target_column='Gene2',
directed=True,
attributes={
'Annotation': 'interaction',
'Direction': 'direction',
'Score': 'score'
},
make_edges_unique=False,
exclude=exclude,
include=include,
compression='zip')
super().__init__(make_edges_unique=False,
**igraph_init_kwargs)
if direct_undirected:
self.direct_undirected_edges(lambda e: e['direction'] == '-')
self.es['interactions'] = [
[interaction.strip() for interaction in edge['interaction'].split(';')]
for edge in self.es
]
self.list_valued_interaction_type_attr = True
self.vs['symbol'] = self.vs['name']
self.map_nodes_to_multiple_targets(BioMap().get_mapper('hgnc'),
TO=['entrez', 'ensembl', 'uniprot_ids', 'mgi_id'],
target_attrs=['entrez', 'ensembl', 'uniprot_ids', 'mgi_id'])
def map_to_mouse(self):
self.expand_nodes('mgi_id')
@property
def filepath(self):
return os.path.join(self.local_root, self.FILENAME)
def download(self):
url = self.download_root+'/'+self.FILENAME
self._download(url, self.filepath, self.verbose)
@property
def undirected_edge_types(self):
return {edge['interaction'] for edge in self.es if edge['direction'] == '-'}
################################################################################
# KEGG #
################################################################################
class KEGG(DeregnetGraph):
KEGG_GRAPH_PATH = os.path.join(DEREGNET_GRAPH_DATA, 'kegg')
def __init__(self,
species='hsa',
exclude=None,
include=None,
make_edges_unique=True,
directed=True):
if not os.path.isdir(self.KEGG_GRAPH_PATH):
os.makedirs(self.KEGG_GRAPH_PATH)
local_file = os.path.join(self.KEGG_GRAPH_PATH, 'kegg_'+species+'.sif')
# TODO: implement download
igraph_init_kwargs = sif_to_igraph_init_kwargs(local_file,
directed=directed,
make_edges_unique=make_edges_unique,
exclude = exclude,
include = include)
super().__init__(make_edges_unique, **igraph_init_kwargs)
self.vs['name'] = [ID.split(':')[-1] for ID in self.vs['name']]
if species == 'hsa':
self.map_nodes_to_multiple_targets(BioMap().get_mapper('hgnc'),
FROM='entrez',
TO=['entrez',
'ensembl',
'symbol',
'uniprot_ids'],
target_attrs=['entrez',
'ensembl',
'symbol',
'uniprot_ids'])
elif species == 'mmu':
self.map_nodes_to_multiple_targets(BioMap().get_mapper('mgi_entrez'),
FROM='entrez',
TO=['entrez', 'symbol', 'name'],
target_attrs=['entrez', 'symbol', 'name'])
self.map_nodes(BioMap().get_mapper('mgi_ensembl'),
FROM='symbol', TO='ensembl',
source_attr='symbol', target_attr='ensembl')
@classmethod
def undirected_edge_types(cls):
return {'binding/association',
'dissociation',
'missing interaction',
'NA'}
@classmethod
def download(self, species='hsa'):
pass
@classmethod
def get(self, species='hsa'):
pass
################################################################################
# Omnipath
################################################################################
class OmniPath(DeregnetGraph):
# TODO: implement download here
def __init__(self, path=None):
if path is None:
self.path = DEREGNET_GRAPH_DATA
else:
self.path = path
def __call__(self):
return ig.Graph.Read_GraphML(os.path.join(self.path, 'omnipath/omnipath_directed_interactions.graphml'))
def ptm_graph(self):
return ig.Graph.Read_GraphML(os.path.join(self.path, 'omnipath/omnipath_ptm_graph.graphml'))
################################################################################
# Pathway Commons
################################################################################
PATHWAY_COMMONS_DOWNLOAD_ROOT='http://www.pathwaycommons.org/archives/PC2'
class PathwayCommons(DeregnetGraph):
def __init__(self,
what='All',
exclude=[],
include=[],
make_edges_unique=True,
download_root=PATHWAY_COMMONS_DOWNLOAD_ROOT,
version=9,
verbose=True):
self.root = download_root
self.version = version
self.verbose = verbose
if not os.path.isdir(self.local_path):
os.makedirs(self.local_path)
filename = self._file_name(what)
filepath = os.path.join(self.local_path, filename)
if not os.path.isfile(filepath):
self.download(what)
igraph_init_kwargs = sif_to_igraph_init_kwargs(sif=filepath,
directed=True,
make_edges_unique=make_edges_unique,
exclude=exclude,
include=include,
compression='gzip')
super().__init__(make_edges_unique, **igraph_init_kwargs)
hgnc = BioMap().get_mapper('hgnc')
self.vs['symbol'] = hgnc.map(self.vs['name'], TO='symbol')
self.map_nodes_to_multiple_targets(hgnc,
TO=['entrez', 'ensembl', 'uniprot_ids', 'mgd_id'],
target_attrs=['entrez', 'ensembl', 'uniprot_ids', 'mgi_id'])
def map_to_mouse(self):
mouse_graph = self.expand_nodes('mgi_id', keep=('symbol', None))
mgi_ensembl = BioMap().get_mapper('mgi_ensembl')
mouse_graph.map_nodes_to_multiple_targets(mgi_ensembl, TO=['symbol', 'ensembl_id'], target_attrs=['symbol', 'ensembl'])
mgi_entrez = BioMap().get_mapper('mgi_entrez')
mouse_graph.map_nodes(mgi_entrez, TO='entrez_id', target_attr='entrez')
return mouse_graph
def download(self, what):
filename = self._file_name(what)
url = self._download_url(filename)
filepath = os.path.join(self.local_path, filename)
self._download(url, filepath, self.verbose)
@property
def local_path(self):
return os.path.join(DEREGNET_GRAPH_DATA, 'pathway_commons')
def _download_url(self, filename):
return self.root+'/v'+str(self.version)+'/'+filename
def _file_name(self, what):
return 'PathwayCommons'+str(self.version)+'.'+what+'.hgnc.sif.gz'
@property
def available_data_sources(self):
return {
'wp': '',
'smpdb': '',
'reconx': '',
'reactome': '',
'psp': '',
'pid': '',
'panther': '',
'netpath': '',
'msigdb': '',
'kegg': '',
'intact': '',
'intact_complex': '',
'inoh': '',
'humancyc': '',
'hprd': '',
'drugbank': '',
'dip': '',
'ctd': '',
'corum': '',
'biogrid': '',
'bind': '',
'All': '',
'Detailed': ''
}
def download_all(self):
for data_source in self.available_data_sources:
self.download(data_source)
@classmethod
def undirected_edge_types(cls):
return { 'reacts-with',
'interacts-with',
'in-complex-with' }
################################################################################
# RegNetwork #
################################################################################
DEFAULT_REG_NETWORK_DOWNLOAD_URL='http://www.regnetworkweb.org/download'
DEFAULT_REG_NETWORK_LOCAL_PATH=os.path.join(DEREGNET_GRAPH_DATA, 'regnetwork')
if not os.path.isdir(DEFAULT_REG_NETWORK_LOCAL_PATH):
os.makedirs(DEFAULT_REG_NETWORK_LOCAL_PATH)
class RegNetwork(DeregnetGraph):
'''
Gene-regulatory graphs defined for Human and Mouse available at:
_______________________________________
| |
| http://www.regnetworkweb.org/home.jsp |
|_______________________________________|
If you use these graphs in your work, please cite (see also RegNetwork.cite):
-------------------------------------------------------------------------------------------
Liu et al.: RegNetwork: an integrated database of transcriptional and post-transcriptional
regulatory networks in human and mouse. Database, 2015, 1-12, doi:10.1093/database/bav095
-------------------------------------------------------------------------------------------
Abstract:
Transcriptional and post-transcriptional regulation of gene expression is of fundamental im-
portance to numerous biological processes. Nowadays, an increasing amount of gene regu-
latory relationships have been documented in various databases and literature. However, to
more efficiently exploit such knowledge for biomedical research and applications, it is ne-
cessary to construct a genome-wide regulatory network database to integrate the informa-
tion on gene regulatory relationships that are widely scattered in many different places.
Therefore, in this work, we build a knowledge-based database, named ‘RegNetwork’, of
gene regulatory networks for human and mouse by collecting and integrating the docu-
mented regulatory interactions among transcription factors (TFs), microRNAs (miRNAs) and
target genes from 25 selected databases. Moreover, we also inferred and incorporated po-
tential regulatory relationships based on transcription factor binding site (TFBS) motifs into
RegNetwork. As a result, RegNetwork contains a comprehensive set of experimentally
observed or predicted transcriptional and post-transcriptional regulatory relationships, and
the database framework is flexibly designed for potential extensions to include gene regula-
tory networks for other organisms in the future. Based on RegNetwork, we characterized the
statistical and topological properties of genome-wide regulatory networks for human and
mouse, we also extracted and interpreted simple yet important network motifs that involve
the interplays between TF-miRNA and their targets. In summary, RegNetwork provides an
integrated resource on the prior information for gene regulatory relationships, and it enables
us to further investigate context-specific transcriptional and post-transcriptional regulatory
interactions based on domain-specific experimental data.
'''
def __init__(self,
species='hsa',
directions=True,
sources=False,
exclude=None,
include=None,
make_edges_unique=False,
root_url=None,
local_path=None,
verbose=True,
node_columns=(1,3),
annotate=True,
databases_as_list=True):
'''
Args:
species (str): Species for which you want the RegNetwork graph for. Available
are 'hsa' (human) and 'mmu' (mouse).
Default: 'hsa'
directions (bool): Whether to include the direction of the edges where known.
Default: True
sources (bool): ...
exclude (list): List of edge types to exclude.
Default: []
include (list): List of edge types to include.
Default: []
make_edges_unique (bool): If True edges with identical incident nodes will result
in only one edge and the attributes will be accessible
as a list. If False, the graph can contain multiedges.
For the DeRegNet algorithms any is fine, it will mostly
be a matter of downstream convenience whether you choose
one or the other.
Default: False
root_url (str): Base URL from where you can access the RegNetwork data for download
Default: 'http://www.regnetworkweb.org/download'
local_path (str): Path where to store downloaded files.
Default: '${HOME}/.deregnet/graphs/regnetwork'
verbose (bool): If True you get some additional messages during download, etc.
node_columns (tuple): DO NOT USE THIS ARGUMENT, THERE REALLY SHOULD BE NO REASON
annotate (bool): If True the graph will have additional attributes, like several
ID systems identifiers as node attribute, etc.
Default: True
databases_as_list (bool): If True the database origin attribute of the edges will be
in list format. Otherwise it will be a comma-seperated string.
Default: True
'''
root_url = DEFAULT_REG_NETWORK_DOWNLOAD_URL if root_url is None else root_url
local_path = DEFAULT_REG_NETWORK_LOCAL_PATH if local_path is None else local_path
data = self.get(species, directions, sources, local_path, root_url=root_url, verbose=verbose)
attributes = {}
if sources and directions:
attributes[4] = 'direction' if not make_edges_unique else 'directions'
elif not sources:
attributes[4] = 'databases'
attributes[5] = 'evidence' if not make_edges_unique else 'evidences'
attributes[6] = 'confidence' if not make_edges_unique else 'confidences'
source_column, target_column = node_columns
igraph_init_kwargs = table_to_igraph_init_kwargs(data,
source_column=source_column,
target_column=target_column,
attributes=attributes,
directed=True,
exclude=exclude,
include=include,
make_edges_unique=make_edges_unique)
super().__init__(make_edges_unique, **igraph_init_kwargs)
self.species = species
self.directions = directions
if annotate and node_columns == (1,3):
self.annotate(databases_as_list)
def annotate(self, databases_as_list):
self.vs['mirna'] = [True if v['name'].startswith('MI') else False for v in self.vs]
self.vs['protein_coding'] = [not v for v in self.vs['mirna']]
tfs = {e.source for e in self.es}
self.vs['tf'] = [(v.index in tfs) if v['protein_coding'] else False for v in self.vs]
self.vs['node_type'] = ['gene' if v['protein_coding'] else 'mirna' for v in self.vs]
self.vs['node_type'] = [self.vs['node_type'][i] if not v['tf'] else 'tf' for i, v in enumerate(self.vs)]
self.map_nodes(BioMap().get_mapper('mirbase'), TO='alias', target_attr='mirna_alias')
# TODO: map MiRBase families
if self.species == 'hsa':
self.map_nodes_to_multiple_targets(BioMap().get_mapper('hgnc'),
FROM='entrez_id',
TO=['entrez_id', 'ensembl_id', 'symbol'],
target_attrs=['entrez', 'ensembl', 'symbol'])
else:
self.map_nodes_to_multiple_targets(BioMap().get_mapper('mgi_entrez'),
FROM='entrez',
TO=['entrez', 'symbol', 'name'],
target_attrs=['entrez', 'symbol', 'name'])
self.map_nodes(BioMap().get_mapper('mgi_ensembl'),
FROM='symbol', TO='ensembl',
source_attr='symbol', target_attr='ensembl')
self.vs['name'] = [','.join(v['mirna_alias']).split(',')[0].split(self.species+'-')[-1] if v['mirna'] else v['symbol'] for v in self.vs]
# edges
if databases_as_list:
self.es['databases'] = [s.split(',') for s in self.es['databases']]
if self.directions:
g = RegNetwork(species=self.species, directions=True, sources=True, annotate=False)
edges = {(g.vs[e.source]['name'], g.vs[e.target]['name']): e['direction'] for e in g.es}
self.es['direction'] = ['-/-' if (self.vs[e.source]['name'], self.vs[e.target]['name']) not in edges
else edges[(self.vs[e.source]['name'], self.vs[e.target]['name'])]
for e in self.es]
self.es['direction'] = ['--|' if self.vs[e.source]['mirna'] else e['direction'] for e in self.es]
self.annotate_with_edge_types()
def annotate_with_edge_types(self):
def get_edge_type(self, edge):
source = self.vs[edge.source]
target = self.vs[edge.target]
if source['mirna']:
if target['tf']:
edge_type = 'mirna-tf'
elif target['mirna']:
edge_type = 'mirna-mirna'
else:
edge_type = 'mirna-gene'
elif source['tf']:
if target['tf']:
edge_type = 'tf-tf'
elif target['mirna']:
edge_type = 'tf-mirna'
else:
edge_type = 'tf-gene'
return edge_type
self.es['edge_type'] = [get_edge_type(self, edge) for edge in self.es]
@classmethod
def download(cls, what='RegulatoryDirections', verbose=True, root_url=None, local_path=None):
root_url = DEFAULT_REG_NETWORK_DOWNLOAD_URL if root_url is None else root_url
if what == 'RegulatoryDirections':
filename = what+'.rar'
else:
filename = what+'.zip'
url = root_url+'/'+filename
local_path = DEFAULT_REG_NETWORK_LOCAL_PATH if local_path is None else local_path
local_file = os.path.join(local_path, filename)
cls._download(url, local_file, verbose)
@classmethod
def get(cls, species='hsa', directed=True, sources=False, local_path=None, **kwargs):
# ---
import rarfile
# ---
species = 'human' if species == 'hsa' else 'mouse'
what = species if not directed else 'RegulatoryDirections'
local_path = DEFAULT_REG_NETWORK_LOCAL_PATH if local_path is None else local_path
if directed:
local_file = os.path.join(local_path, what+'.rar')
else:
local_file = os.path.join(local_path, species+'.zip')
if not sources:
return cls.get_data_from_form(species, local_path, skiprows=1, header=None)
if not os.path.isfile(local_file):
cls.download(what, local_path=local_path, **kwargs)
if directed:
filename = 'kegg.'+species+'.reg.direction'
with rarfile.RarFile(local_file) as rf:
with rf.open(filename) as fp:
return pd.read_table(fp, sep='\s+', skiprows=1, header=None)
else:
filename = species+'.source'
with zipfile.ZipFile(local_file) as zf:
with zf.open(filename) as fp:
return pd.read_table(fp, header=None, low_memory=False)
@classmethod
def get_data_from_form(cls, species='hsa', local_path=None, **kwargs):
local_path = DEFAULT_REG_NETWORK_LOCAL_PATH if local_path is None else local_path
local_file = cls.download_via_form(species, local_path)
return pd.read_csv(local_file, low_memory=False, **kwargs)
@classmethod
def download_via_form(cls, species='hsa', local_path=None):
def response_status(response, response_name):
if response.status_code != 200:
print('ERROR during %s request!' % response_name)
return None
local_path = DEFAULT_REG_NETWORK_LOCAL_PATH if local_path is None else local_path
species = 'human' if species in {'hsa', 'human'} else 'mouse'
local_file = os.path.join(local_path, species+'.csv')
if os.path.isfile(local_file):
return local_file
if species == 'mouse':
search_response = requests.get(cls.search_request('mmu'))
if response_status(search_response, 'search'): return None
export_response = requests.get(cls.export_request('mmu'))
if response_status(search_response, 'export'): return None
with open(local_file, 'wb') as fp:
fp.write(export_response.content)
else:
search_response = requests.get(cls.search_request('hsa', 'Experimental'))
if response_status(search_response, 'search'): return None
export_response = requests.get(cls.export_request('hsa'))
if response_status(search_response, 'export'): return None
local_file_experimental = os.path.join(local_path, 'human_experimental.csv')
with open(local_file_experimental, 'wb') as fp:
fp.write(export_response.content)
search_response = requests.get(cls.search_request('hsa', 'Predicted'))
if response_status(search_response, 'search'): return None
export_response = requests.get(cls.export_request('hsa'))
if response_status(search_response, 'export'): return None
local_file_predicted = os.path.join(local_path, 'human_predicted.csv')
with open(local_file_predicted, 'wb') as fp:
fp.write(export_response.content)
experimental = pd.read_csv(local_file_experimental, low_memory=False)
predicted = pd.read_csv(local_file_predicted, low_memory=False)
human = pd.concat([experimental, predicted])
human.to_csv(local_file, index=False)
return local_file
@classmethod
def search_request(cls, species='hsa', evidence='all'):
url = 'http://www.regnetworkweb.org/search.jsp?'
url += 'searchItem=&searchType=all&'
url += 'organism='+('human&' if species == 'hsa' else 'mouse&')
url += 'database=all&evidence='+evidence+'&confidence=all&'
url += 'resultsPerPage=30&prevValidPN=1&orderBy=RegSymbol_Asc&pageNumber=1'
return url
@classmethod
def export_request(cls, species='hsa'):
url = 'http://www.regnetworkweb.org/export.jsp?format=csv&'
url += 'sql=SELECT+*+FROM+'+('human' if species == 'hsa' else 'mouse')+'+WHERE'
url += '+%271%27+ORDER+BY+UPPER%28regulator_symbol%29+ASC'
return url
|
sebwink/deregnet
|
python/deregnet/graphs.py
|
Python
|
bsd-3-clause
| 37,195
|
[
"Cytoscape"
] |
306309795de6acc71872b0eb14f40a2860c1b83457994f2dddb014875ee54d75
|
##############################################################################
# Copyright (c) 2013-2017, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Created by Todd Gamblin, tgamblin@llnl.gov, All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/llnl/spack
# Please also see the NOTICE and LICENSE files for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
from spack import *
class PyGriddataformats(PythonPackage):
"""The gridDataFormats package provides classes to unify reading
and writing n-dimensional datasets. One can read grid data from
files, make them available as a Grid object, and write out the
data again."""
homepage = "http://www.mdanalysis.org/GridDataFormats"
url = "https://pypi.io/packages/source/G/GridDataFormats/GridDataFormats-0.3.3.tar.gz"
version('0.3.3', '5c83d3bdd421eebcee10111942c5a21f')
depends_on('python@2.7:')
depends_on('py-setuptools', type='build')
depends_on('py-numpy@1.0.3:', type=('build', 'run'))
depends_on('py-six', type=('build', 'run'))
|
TheTimmy/spack
|
var/spack/repos/builtin/packages/py-griddataformats/package.py
|
Python
|
lgpl-2.1
| 1,915
|
[
"MDAnalysis"
] |
e7695db0bb0c683f789ff72a3ce2f6e194d28889f450ead63df0241a6604fb25
|
# Copyright 2014 Cloudera Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import ibis.util as util
import ibis.expr.types as ir
import ibis.expr.operations as ops
class FormatMemo(object):
# A little sanity hack to simplify the below
def __init__(self):
from collections import defaultdict
self.formatted = {}
self.aliases = {}
self.ops = {}
self.counts = defaultdict(lambda: 0)
self._repr_memo = {}
def __contains__(self, obj):
return self._key(obj) in self.formatted
def _key(self, obj):
memo_key = id(obj)
if memo_key in self._repr_memo:
return self._repr_memo[memo_key]
result = self._format(obj)
self._repr_memo[memo_key] = result
return result
def _format(self, obj):
return obj._repr(memo=self._repr_memo)
def observe(self, obj, formatter=lambda x: x._repr()):
key = self._key(obj)
if key not in self.formatted:
self.aliases[key] = 'ref_%d' % len(self.formatted)
self.formatted[key] = formatter(obj)
self.ops[key] = obj
self.counts[key] += 1
def count(self, obj):
return self.counts[self._key(obj)]
def get_alias(self, obj):
return self.aliases[self._key(obj)]
def get_formatted(self, obj):
return self.formatted[self._key(obj)]
class ExprFormatter(object):
"""
For creating a nice tree-like representation of an expression graph for
displaying in the console.
TODO: detect reused DAG nodes and do not display redundant information
"""
def __init__(self, expr, indent_size=2, base_level=0, memo=None,
memoize=True):
self.expr = expr
self.indent_size = indent_size
self.base_level = base_level
self.memoize = memoize
# For tracking "extracted" objects, like tables, that we don't want to
# print out more than once, and simply alias in the expression tree
self.memo = memo or FormatMemo()
def get_result(self):
what = self.expr.op()
if self.memoize:
self._memoize_tables()
if isinstance(what, ops.TableNode) and what.has_schema():
# This should also catch aggregations
if not self.memoize and what in self.memo:
text = 'Table: %s' % self.memo.get_alias(what)
elif isinstance(what, ops.PhysicalTable):
text = self._format_table(what)
else:
# Any other node type
text = self._format_node(what)
elif isinstance(what, ops.TableColumn):
text = self._format_column(self.expr)
elif isinstance(what, ir.Node):
text = self._format_node(what)
elif isinstance(what, ops.Literal):
text = 'Literal[%s] %s' % (self._get_type_display(),
str(what.value))
if isinstance(self.expr, ir.ValueExpr) and self.expr._name is not None:
text = '{0} = {1}'.format(self.expr.get_name(), text)
if self.memoize:
alias_to_text = [(self.memo.aliases[x],
self.memo.formatted[x],
self.memo.ops[x])
for x in self.memo.formatted]
alias_to_text.sort()
# A hack to suppress printing out of a ref that is the result of
# the top level expression
refs = [x + '\n' + y
for x, y, op in alias_to_text
if not op.equals(what)]
text = '\n\n'.join(refs + [text])
return self._indent(text, self.base_level)
def _memoize_tables(self):
table_memo_ops = (ops.Aggregation, ops.Selection,
ops.SelfReference)
def walk(expr):
op = expr.op()
def visit(arg):
if isinstance(arg, list):
[visit(x) for x in arg]
elif isinstance(arg, ir.Expr):
walk(arg)
if isinstance(op, ops.PhysicalTable):
self.memo.observe(op, self._format_table)
elif isinstance(op, ir.Node):
visit(op.args)
if isinstance(op, table_memo_ops):
self.memo.observe(op, self._format_node)
elif isinstance(op, ops.TableNode) and op.has_schema():
self.memo.observe(op, self._format_table)
walk(self.expr)
def _indent(self, text, indents=1):
return util.indent(text, self.indent_size * indents)
def _format_table(self, table):
# format the schema
rows = ['name: {0!s}\nschema:'.format(table.name)]
rows.extend([' %s : %s' % tup for tup in
zip(table.schema.names, table.schema.types)])
opname = type(table).__name__
type_display = self._get_type_display(table)
opline = '%s[%s]' % (opname, type_display)
return '{0}\n{1}'.format(opline, self._indent('\n'.join(rows)))
def _format_column(self, expr):
# HACK: if column is pulled from a Filter of another table, this parent
# will not be found in the memo
col = expr.op()
parent_op = col.parent().op()
if parent_op in self.memo:
table_formatted = self.memo.get_alias(parent_op)
else:
table_formatted = '\n' + self._indent(self._format_node(parent_op))
type_display = self._get_type_display(self.expr)
return ("Column[{0}] '{1}' from table {2}"
.format(type_display, col.name, table_formatted))
def _format_node(self, op):
formatted_args = []
def visit(what, extra_indents=0):
if isinstance(what, ir.Expr):
result = self._format_subexpr(what)
else:
result = self._indent(str(what))
if extra_indents > 0:
result = util.indent(result, self.indent_size)
formatted_args.append(result)
arg_names = getattr(op, '_arg_names', None)
if arg_names is None:
for arg in op.args:
if isinstance(arg, list):
for x in arg:
visit(x)
else:
visit(arg)
else:
for arg, name in zip(op.args, arg_names):
if name is not None:
name = self._indent('{0}:'.format(name))
if isinstance(arg, list):
if name is not None and len(arg) > 0:
formatted_args.append(name)
indents = 1
else:
indents = 0
for x in arg:
visit(x, extra_indents=indents)
else:
if name is not None:
formatted_args.append(name)
indents = 1
else:
indents = 0
visit(arg, extra_indents=indents)
opname = type(op).__name__
type_display = self._get_type_display(op)
opline = '%s[%s]' % (opname, type_display)
return '\n'.join([opline] + formatted_args)
def _format_subexpr(self, expr):
formatter = ExprFormatter(expr, base_level=1, memo=self.memo,
memoize=False)
return formatter.get_result()
def _get_type_display(self, expr=None):
if expr is None:
expr = self.expr
if isinstance(expr, ir.Node):
expr = expr.to_expr()
if isinstance(expr, ir.TableExpr):
return 'table'
elif isinstance(expr, ir.ArrayExpr):
return 'array(%s)' % expr.type()
elif isinstance(expr, ir.SortExpr):
return 'array-sort'
elif isinstance(expr, (ir.ScalarExpr, ir.AnalyticExpr)):
return '%s' % expr.type()
elif isinstance(expr, ir.ExprList):
list_args = [self._get_type_display(arg)
for arg in expr.op().args]
return ', '.join(list_args)
else:
raise NotImplementedError
|
mariusvniekerk/ibis
|
ibis/expr/format.py
|
Python
|
apache-2.0
| 8,773
|
[
"VisIt"
] |
c4dd0d0683a58b46b0a6ef99b8125f82ac678fa27e992e8c3d4886796550774a
|
###############################
# This file is part of PyLaDa.
#
# Copyright (C) 2013 National Renewable Energy Lab
#
# PyLaDa is a high throughput computational platform for Physics. It aims to make it easier to submit
# large numbers of jobs on supercomputers. It provides a python interface to physical input, such as
# crystal structures, as well as to a number of DFT (VASP, CRYSTAL) and atomic potential programs. It
# is able to organise and launch computational jobs on PBS and SLURM.
#
# PyLaDa is free software: you can redistribute it and/or modify it under the terms of the GNU General
# Public License as published by the Free Software Foundation, either version 3 of the License, or (at
# your option) any later version.
#
# PyLaDa is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even
# the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General
# Public License for more details.
#
# You should have received a copy of the GNU General Public License along with PyLaDa. If not, see
# <http://www.gnu.org/licenses/>.
###############################
def test_common():
from os.path import join, dirname
from numpy import array, all, abs
from pylada.vasp import Extract
import pylada
from quantities import eV, angstrom
a = Extract(directory=join(dirname(__file__), 'data', 'COMMON'))
assert a.success == True
assert a.algo == "Fast"
assert a.is_dft == True
assert a.is_gw == False
assert abs(a.encut - 245.3 * eV) < 1e-8
assert repr(a.datetime) == 'datetime.datetime(2012, 3, 8, 21, 18, 29)'
assert a.LDAUType is None
assert len(a.HubbardU_NLEP) == 0
assert a.pseudopotential == 'PAW_PBE'
assert set(a.stoichiometry) == {2}
assert set(a.species) == {'Si'}
assert a.isif == 7
assert abs(a.sigma - 0.2 * eV) < 1e-8
assert a.nsw == 50
assert a.ibrion == 2
assert a.relaxation == "volume"
assert a.ispin == 1
assert a.isym == 2
assert a.name == 'has a name'
assert a.system == "has a name"
assert all(abs(array(a.ionic_charges) - [4.0]) < 1e-8)
assert abs(a.nelect - 8.0) < 1e-8
assert abs(a.extraelectron - 0e0) < 1e-8
assert abs(a.nbands - 8) < 1e-8
assert all(abs(array([[0, 2.73612395, 2.73612395], [2.73612395, 0, 2.73612395], [
2.73612395, 2.73612395, 0]]) - a.structure.cell) < 1e-4)
assert all(abs(a.structure.scale - 1.0 * angstrom) < 1e-4)
assert all(abs(a.structure.scale * a.structure.cell - a._grep_structure.cell * angstrom) < 1e-4)
assert all(abs(a.structure[0].pos) < 1e-8)
assert all(abs(a.structure[1].pos - 1.36806) < 1e-6)
assert all([b.type == 'Si' for b in a.structure])
assert abs(a.structure.energy + 10.665642 * eV) < 1e-6
assert abs(a.sigma - 0.2 * eV) < 1e-6
assert abs(a.ismear - 1) < 1e-6
assert abs(a.potim - 0.5) < 1e-6
assert abs(a.istart - 0) < 1e-6
assert abs(a.icharg - 2) < 1e-6
assert a.precision == "accurate"
assert abs(a.ediff - 2e-5) < 1e-8
assert abs(a.ediffg - 2e-4) < 1e-8
assert a.lorbit == 0
assert abs(a.nupdown + 1.0) < 1e-8
assert a.lmaxmix == 4
assert abs(a.valence - 8.0) < 1e-8
assert a.nonscf == False
assert a.lwave == False
assert a.lcharg
assert a.lvtot == False
assert a.nelm == 60
assert a.nelmin == 2
assert a.nelmdl == -5
assert all(abs(a.kpoints - array([[0.25, 0.25, 0.25], [0.75, -0.25, -0.25]])) < 1e-8)
assert all(abs(a.multiplicity - [96.0, 288.0]) < 1e-8)
pylada.verbose_representation = True
|
pylada/pylada-light
|
tests/vasp/extract/test_common.py
|
Python
|
gpl-3.0
| 3,615
|
[
"CRYSTAL",
"VASP"
] |
d4be27a91be0e95deaa93e36448d1bde504419095a19610fd207e912cd0c8037
|
import sys
import argparse
import os
import sys
sys.path.insert(0, "../../pyDataView/")
import postproclib as ppl
from misclib import Chdir
class MockHeader(object):
def __init__(self):
self.vmd_skip = 0
self.vmd_start = 0
self.vmd_end = 0
self.Nsteps = 0
self.tplot = 0
self.delta_t = 0
self.initialstep = 0
class MockRaw(object):
def __init__(self, fdir):
self.fdir = fdir
try:
self.header = ppl.MDHeaderData(fdir)
except IOError:
self.header = MockHeader()
class dummyField(ppl.Field):
"""
Dummy field object
"""
dtype = 'd'
nperbin = 1
fname = ''
plotfreq = 1
def __init__(self, fdir):
self.Raw = MockRaw(fdir)
def prepare_vmd_files(args):
"""
Copy tcl files for postprocessing
and reformat vmd temp files
"""
fobj = dummyField(args['fdir'])
vmdobj = ppl.VMDFields(fobj, args['fdir'])
vmdobj.copy_tclfiles() #Create VMD vol_data folder and copy vmd driver scripts
vmdobj.reformat()
def run_vmd(parent_parser=argparse.ArgumentParser(add_help=False)):
def print_fieldlist():
outstr = 'Type of field to overlay with vmd \n'
try:
ppObj = ppl.All_PostProc('../src/results/')
outstr = outstr + str(ppObj)
except:
print(' \n')
pass
outstr = outstr + '\n N.B. Make sure to include quotes if there is a space in field name \n'
return outstr
#Keyword arguments
parser = argparse.ArgumentParser(description='run_vmd vs. master jay -- Runs VMD with overlayed field',
parents=[parent_parser])
try:
argns, unknown = parser.parse_known_args()
print('Using directory defined as ', argns.fdir)
except AttributeError:
parser.add_argument('-d','--fdir',dest='fdir', nargs='?',
help='Directory with vmd file and field files',
default=None)
parser.add_argument('-f', '--field', dest='field',
help=print_fieldlist(), default=None)
parser.add_argument('-c', '--comp', dest='comp',
help='Component name', default=None)
parser.add_argument('-l', '--clims', dest='clims',
help='Colour limits', default=None)
parser.add_argument('-p', '--poly',help='Polymer flag',
action='store_const', const=True)
parser.add_argument('-m', '--mie',help='Mie types flag',
action='store_const', const=True)
args = vars(parser.parse_args())
#Static arguments
if args['fdir'] == None:
scriptdir = os.path.dirname(os.path.realpath(__file__))
args['fdir'] = scriptdir + '/../src/results/'
if args['field'] == None:
print("No field type specified -- using default value of no field")
args['field'] = None
component = 0
if(len(sys.argv) < 2 or sys.argv[1] in ['--help', '-help', '-h']):
ppObj = ppl.All_PostProc(args['fdir'])
print("Available field types include")
print(ppObj)
sys.exit()
if args['comp'] == None:
print("No components direction specified, setting default = 0")
args['comp'] = 0
print(args['clims'], type(args['clims']))
if args['clims'] == None:
print("No colour limits specified -- using defaults min/max")
clims = None
else:
clims = [float(i) for i in args['clims'].replace("[","").replace("]","").split(",")]
#Polymer case, no field at the moment
if args['poly']:
if args['field'] != None:
print("Can't overlay field and polymers")
prepare_vmd_files(args)
#Open vmd
with Chdir(args['fdir']):
#Build vmd polymer file
ppl.build_psf()
ppl.concat_files()
#Call polymer script
command = "vmd -e ./vmd/load_polymer.vmd"
os.system(command)
sys.exit()
if args['mie']:
if args['field'] != None:
print("Can't overlay field and mie molecules")
prepare_vmd_files(args)
with Chdir(args['fdir']):
#Call mie script
command = "vmd -e ./vmd/load_miepsf.vmd"
os.system(command)
sys.exit()
#Plane field case
if args['field'] == None:
prepare_vmd_files(args)
#Open vmd
with Chdir(args['fdir']):
command = "vmd " + "./vmd_out.dcd"
os.system(command)
#Overlayed field case
else:
try:
ppObj = ppl.All_PostProc(args['fdir'])
fobj = ppObj.plotlist[args['field']]
except KeyError:
print("Field not recognised -- available field types include:")
print(ppObj)
sys.exit()
except:
raise
vmdobj = ppl.VMDFields(fobj, args['fdir'])
vmdobj.copy_tclfiles() #Create VMD vol_data folder and copy vmd driver scripts
vmdobj.reformat()
vmdobj.write_vmd_header()
vmdobj.write_vmd_intervals()
vmdobj.write_dx_range(component=args['comp'], clims=clims)
vmdobj.writecolormap('RdYlBu')
#Open vmd
with Chdir(args['fdir'] + './vmd/'):
command = "vmd -e " + "./plot_MD_field.vmd"
os.system(command)
if __name__ == "__main__":
run_vmd()
|
edwardsmith999/pyDataView
|
run_vmd.py
|
Python
|
gpl-3.0
| 5,515
|
[
"VMD"
] |
c5ffc1876cdbb6cf07921904fc536659b839572c3cdc45c6ae3a5ed7b096b33c
|
from __future__ import print_function
import sys
import os
# # YUCK! scinet bonkers python setup
# paths = os.environ['PYTHONPATH']
# sys.path = paths.split(':') + sys.path
# if __name__ == '__main__':
# import matplotlib
# matplotlib.use('Agg')
'''
Forced phot of Megacam images:
python legacyanalysis/euclid.py --queue /global/cscratch1/sd/dstn/euclid/images/megacam/lists/i.SNR10-MIQ.lst > q1
edit q1 ->
python legacyanalysis/euclid.py --survey-dir euclid-rex --forced 4276 --out euclid-rex/forced/megacam-962810-ccd00.fits
'''
import numpy as np
import pylab as plt
from glob import glob
import fitsio
import astropy
from astrometry.util.fits import fits_table, merge_tables
from astrometry.util.util import wcs_pv2sip_hdr
from astrometry.util.plotutils import PlotSequence, plothist, loghist
from astrometry.util.ttime import Time, MemMeas
from astrometry.util.multiproc import multiproc
from legacypipe.runbrick import run_brick, rgbkwargs, rgbkwargs_resid
from legacypipe.survey import LegacySurveyData, imsave_jpeg, get_rgb
from legacypipe.image import LegacySurveyImage
from legacypipe.catalog import read_fits_catalog
from tractor.sky import ConstantSky
from tractor.sfd import SFDMap
from tractor import Tractor, NanoMaggies
from tractor.galaxy import disable_galaxy_cache
from tractor.ellipses import EllipseE
# For ACS reductions:
#rgbscales = dict(I=(0, 0.01))
rgbscales = dict(I=(0, 0.003))
rgbkwargs .update(scales=rgbscales)
rgbkwargs_resid.update(scales=rgbscales)
SFDMap.extinctions.update({'DES I': 1.592})
allbands = 'I'
rgbscales_cfht = dict(g = (2, 0.004),
r = (1, 0.006),
i = (0, 0.02),
# DECam
z = (0, 0.025),
)
def make_zeropoints():
base = 'euclid/images/'
C = fits_table()
C.image_filename = []
C.image_hdu = []
C.camera = []
C.expnum = []
C.filter = []
C.exptime = []
C.crpix1 = []
C.crpix2 = []
C.crval1 = []
C.crval2 = []
C.cd1_1 = []
C.cd1_2 = []
C.cd2_1 = []
C.cd2_2 = []
C.width = []
C.height = []
C.ra = []
C.dec = []
C.ccdzpt = []
C.ccdname = []
C.ccdraoff = []
C.ccddecoff = []
C.fwhm = []
C.propid = []
C.mjd_obs = []
fns = glob(base + 'cfhtls/CFHTLS_D25_*_100028p021230_T0007_MEDIAN.fits')
fns.sort()
for fn in fns:
psffn = fn.replace('.fits', '_psfex.psf')
if not os.path.exists(psffn):
print('Missing:', psffn)
sys.exit(-1)
wtfn = fn.replace('.fits', '_weight.fits')
if not os.path.exists(wtfn):
print('Missing:', wtfn)
sys.exit(-1)
dqfn = fn.replace('.fits', '.flg.fits')
if not os.path.exists(dqfn):
print('Missing:', dqfn)
sys.exit(-1)
extname = 'D25'
ralo = 360.
rahi = 0.
declo = 90.
dechi = -90.
for i,fn in enumerate(fns):
F = fitsio.FITS(fn)
print(len(F), 'FITS extensions in', fn)
print(F)
phdr = F[0].read_header()
# FAKE
expnum = 7000 + i+1
filt = phdr['FILTER']
print('Filter', filt)
filt = filt[0]
exptime = phdr['EXPTIME']
psffn = fn.replace('.fits', '_psfex.psf')
PF = fitsio.FITS(psffn)
# Just one big image in the primary HDU.
hdr = phdr
hdu = 0
C.image_filename.append(fn.replace(base,''))
C.image_hdu.append(hdu)
C.camera.append('cfhtls')
C.expnum.append(expnum)
C.filter.append(filt)
C.exptime.append(exptime)
C.ccdzpt.append(hdr['MZP_AB'])
for k in ['CRPIX1','CRPIX2','CRVAL1','CRVAL2','CD1_1','CD1_2','CD2_1','CD2_2']:
C.get(k.lower()).append(hdr[k])
W = hdr['NAXIS1']
H = hdr['NAXIS2']
C.width.append(W)
C.height.append(H)
wcs = wcs_pv2sip_hdr(hdr)
rc,dc = wcs.radec_center()
C.ra.append(rc)
C.dec.append(dc)
x,y = np.array([1,1,1,W/2.,W,W,W,W/2.]), np.array([1,H/2.,H,H,H,H/2.,1,1])
rr,dd = wcs.pixelxy2radec(x,y)
ralo = min(ralo, rr.min())
rahi = max(rahi, rr.max())
declo = min(declo, dd.min())
dechi = max(dechi, dd.max())
hdu = 1
print('Reading PSF from', psffn, 'hdu', hdu)
psfhdr = PF[hdu].read_header()
fwhm = psfhdr['PSF_FWHM']
C.ccdname.append(extname)
C.ccdraoff.append(0.)
C.ccddecoff.append(0.)
C.fwhm.append(fwhm)
C.propid.append('0')
C.mjd_obs.append(0.)
C.to_np_arrays()
fn = 'euclid/survey-ccds-cfhtls.fits.gz'
C.writeto(fn)
print('Wrote', fn)
print('RA range', ralo, rahi)
print('Dec range', declo, dechi)
r = np.linspace(ralo, rahi, 21)
d = np.linspace(declo, dechi, 21)
rr,dd = np.meshgrid(r, d)
cols,rows = np.meshgrid(np.arange(len(r)), np.arange(len(d)))
B = fits_table()
B.ra1 = rr[1:,:-1].ravel()
B.ra2 = rr[1:, 1:].ravel()
B.dec1 = dd[:-1,1:].ravel()
B.dec2 = dd[1: ,1:].ravel()
B.ra = (B.ra1 + B.ra2 ) / 2.
B.dec = (B.dec1 + B.dec2) / 2.
B.brickrow = rows[:-1,:-1].ravel()
B.brickcol = cols[:-1,:-1].ravel()
B.brickq = (B.brickrow % 2) * 2 + (B.brickcol % 2)
B.brickid = 1 + np.arange(len(B))
B.brickname = np.array(['%05ip%04i' % (int(100.*r), int(100.*d))
for r,d in zip(B.ra, B.dec)])
B.writeto('euclid/survey-bricks.fits.gz')
return
C = fits_table()
C.image_filename = []
C.image_hdu = []
C.camera = []
C.expnum = []
C.filter = []
C.exptime = []
C.crpix1 = []
C.crpix2 = []
C.crval1 = []
C.crval2 = []
C.cd1_1 = []
C.cd1_2 = []
C.cd2_1 = []
C.cd2_2 = []
C.width = []
C.height = []
C.ra = []
C.dec = []
C.ccdzpt = []
C.ccdname = []
C.ccdraoff = []
C.ccddecoff = []
C.fwhm = []
C.propid = []
C.mjd_obs = []
for iband,band in enumerate(['Y','J','H']):
fn = base + 'vista/UVISTA_%s_DR1_CFHT.fits' % band
hdu = 0
hdr = fitsio.read_header(fn, ext=hdu)
C.image_hdu.append(hdu)
C.image_filename.append(fn.replace(base,''))
C.camera.append('vista')
expnum = iband + 1
C.expnum.append(expnum)
C.filter.append(band)
C.exptime.append(hdr['EXPTIME'])
C.ccdzpt.append(hdr['MAGZEROP'])
for k in ['CRPIX1','CRPIX2','CRVAL1','CRVAL2','CD1_1','CD1_2','CD2_1','CD2_2']:
C.get(k.lower()).append(hdr[k])
C.width.append(hdr['NAXIS1'])
C.height.append(hdr['NAXIS2'])
wcs = wcs_pv2sip_hdr(hdr)
rc,dc = wcs.radec_center()
C.ra.append(rc)
C.dec.append(dc)
psffn = fn.replace('.fits', '_psfex.psf')
psfhdr = fitsio.read_header(psffn, ext=1)
fwhm = psfhdr['PSF_FWHM']
C.fwhm.append(fwhm)
C.ccdname.append('0')
C.ccdraoff.append(0.)
C.ccddecoff.append(0.)
C.propid.append('0')
C.mjd_obs.append(0.)
C.to_np_arrays()
fn = 'euclid/survey-ccds-vista.fits.gz'
C.writeto(fn)
print('Wrote', fn)
return
C = fits_table()
C.image_filename = []
C.image_hdu = []
C.camera = []
C.expnum = []
C.filter = []
C.exptime = []
C.crpix1 = []
C.crpix2 = []
C.crval1 = []
C.crval2 = []
C.cd1_1 = []
C.cd1_2 = []
C.cd2_1 = []
C.cd2_2 = []
C.width = []
C.height = []
C.ra = []
C.dec = []
C.ccdzpt = []
C.ccdname = []
C.ccdraoff = []
C.ccddecoff = []
C.fwhm = []
C.propid = []
C.mjd_obs = []
base = 'euclid/images/'
fns = glob(base + 'acs-vis/*_sci.VISRES.fits')
fns.sort()
for fn in fns:
hdu = 0
hdr = fitsio.read_header(fn, ext=hdu)
C.image_hdu.append(hdu)
C.image_filename.append(fn.replace(base,''))
C.camera.append('acs-vis')
words = fn.split('_')
expnum = int(words[-2], 10)
C.expnum.append(expnum)
filt = words[-4]
C.filter.append(filt)
C.exptime.append(hdr['EXPTIME'])
C.ccdzpt.append(hdr['PHOTZPT'])
for k in ['CRPIX1','CRPIX2','CRVAL1','CRVAL2','CD1_1','CD1_2','CD2_1','CD2_2']:
C.get(k.lower()).append(hdr[k])
C.width.append(hdr['NAXIS1'])
C.height.append(hdr['NAXIS2'])
wcs = wcs_pv2sip_hdr(hdr)
rc,dc = wcs.radec_center()
C.ra.append(rc)
C.dec.append(dc)
psffn = fn.replace('_sci.VISRES.fits', '_sci.VISRES_psfex.psf')
psfhdr = fitsio.read_header(psffn, ext=1)
fwhm = psfhdr['PSF_FWHM']
C.ccdname.append('0')
C.ccdraoff.append(0.)
C.ccddecoff.append(0.)
#C.fwhm.append(0.18 / 0.1)
C.fwhm.append(fwhm)
C.propid.append('0')
C.mjd_obs.append(0.)
C.to_np_arrays()
fn = 'euclid/survey-ccds-acsvis.fits.gz'
C.writeto(fn)
print('Wrote', fn)
C = fits_table()
C.image_filename = []
C.image_hdu = []
C.camera = []
C.expnum = []
C.filter = []
C.exptime = []
C.crpix1 = []
C.crpix2 = []
C.crval1 = []
C.crval2 = []
C.cd1_1 = []
C.cd1_2 = []
C.cd2_1 = []
C.cd2_2 = []
C.width = []
C.height = []
C.ra = []
C.dec = []
C.ccdzpt = []
C.ccdname = []
C.ccdraoff = []
C.ccddecoff = []
C.fwhm = []
C.propid = []
C.mjd_obs = []
fns = glob(base + 'megacam/*p.fits')
fns.sort()
for fn in fns:
psffn = fn.replace('p.fits', 'p_psfex.psf')
if not os.path.exists(psffn):
print('Missing:', psffn)
sys.exit(-1)
wtfn = fn.replace('p.fits', 'p_weight.fits')
if not os.path.exists(wtfn):
print('Missing:', wtfn)
sys.exit(-1)
dqfn = fn.replace('p.fits', 'p_flag.fits')
if not os.path.exists(dqfn):
print('Missing:', dqfn)
sys.exit(-1)
for fn in fns:
F = fitsio.FITS(fn)
print(len(F), 'FITS extensions in', fn)
print(F)
phdr = F[0].read_header()
expnum = phdr['EXPNUM']
filt = phdr['FILTER']
print('Filter', filt)
filt = filt[0]
exptime = phdr['EXPTIME']
psffn = fn.replace('p.fits', 'p_psfex.psf')
PF = fitsio.FITS(psffn)
for hdu in range(1, len(F)):
print('Reading header', fn, 'hdu', hdu)
#hdr = fitsio.read_header(fn, ext=hdu)
hdr = F[hdu].read_header()
C.image_filename.append(fn.replace(base,''))
C.image_hdu.append(hdu)
C.camera.append('megacam')
C.expnum.append(expnum)
C.filter.append(filt)
C.exptime.append(exptime)
C.ccdzpt.append(hdr['PHOTZPT'])
for k in ['CRPIX1','CRPIX2','CRVAL1','CRVAL2','CD1_1','CD1_2','CD2_1','CD2_2']:
C.get(k.lower()).append(hdr[k])
C.width.append(hdr['NAXIS1'])
C.height.append(hdr['NAXIS2'])
wcs = wcs_pv2sip_hdr(hdr)
rc,dc = wcs.radec_center()
C.ra.append(rc)
C.dec.append(dc)
print('Reading PSF from', psffn, 'hdu', hdu)
#psfhdr = fitsio.read_header(psffn, ext=hdu)
psfhdr = PF[hdu].read_header()
fwhm = psfhdr['PSF_FWHM']
C.ccdname.append(hdr['EXTNAME'])
C.ccdraoff.append(0.)
C.ccddecoff.append(0.)
C.fwhm.append(fwhm)
C.propid.append('0')
C.mjd_obs.append(hdr['MJD-OBS'])
C.to_np_arrays()
fn = 'euclid/survey-ccds-megacam.fits.gz'
C.writeto(fn)
print('Wrote', fn)
class AcsVisImage(LegacySurveyImage):
def __init__(self, *args, **kwargs):
super(AcsVisImage, self).__init__(*args, **kwargs)
self.psffn = self.imgfn.replace('_sci.VISRES.fits', '_sci.VISRES_psfex.psf')
assert(self.psffn != self.imgfn)
self.wtfn = self.imgfn.replace('_sci', '_wht')
assert(self.wtfn != self.imgfn)
self.dqfn = self.imgfn.replace('_sci', '_flg')
assert(self.dqfn != self.imgfn)
self.name = 'AcsVisImage: expnum %i' % self.expnum
self.dq_saturation_bits = 0
def get_wcs(self):
# Make sure the PV-to-SIP converter samples enough points for small
# images
stepsize = 0
if min(self.width, self.height) < 600:
stepsize = min(self.width, self.height) / 10.;
hdr = self.read_image_header()
wcs = wcs_pv2sip_hdr(hdr, stepsize=stepsize)
return wcs
def read_invvar(self, clip=True, **kwargs):
'''
Reads the inverse-variance (weight) map image.
'''
#return self._read_fits(self.wtfn, self.hdu, **kwargs)
img = self.read_image(**kwargs)
# # Estimate per-pixel noise via Blanton's 5-pixel MAD
slice1 = (slice(0,-5,10),slice(0,-5,10))
slice2 = (slice(5,None,10),slice(5,None,10))
mad = np.median(np.abs(img[slice1] - img[slice2]).ravel())
sig1 = 1.4826 * mad / np.sqrt(2.)
print('sig1 estimate:', sig1)
invvar = np.ones_like(img) / sig1**2
return invvar
def read_dq(self, **kwargs):
'''
Reads the Data Quality (DQ) mask image.
'''
print('Reading data quality image', self.dqfn, 'ext', self.hdu)
dq = self._read_fits(self.dqfn, self.hdu, **kwargs)
return dq
def read_sky_model(self, splinesky=False, slc=None, **kwargs):
sky = ConstantSky(0.)
return sky
class MegacamImage(LegacySurveyImage):
def __init__(self, *args, **kwargs):
super(MegacamImage, self).__init__(*args, **kwargs)
#self.psffn = self.imgfn.replace('p.fits', 'p_psfex.psf')
self.psffn = self.imgfn.replace('.fits', '_psfex.psf')
assert(self.psffn != self.imgfn)
#self.wtfn = self.imgfn.replace('p.fits', 'p_weight.fits')
self.wtfn = self.imgfn.replace('.fits', '_weight.fits')
assert(self.wtfn != self.imgfn)
#self.dqfn = self.imgfn.replace('p.fits', 'p_flag.fits')
self.dqfn = self.imgfn.replace('.fits', '_flag.fits')
assert(self.dqfn != self.imgfn)
self.name = 'MegacamImage: %i-%s' % (self.expnum, self.ccdname)
self.dq_saturation_bits = 0
def get_wcs(self):
# Make sure the PV-to-SIP converter samples enough points for small
# images
stepsize = 0
if min(self.width, self.height) < 600:
stepsize = min(self.width, self.height) / 10.;
hdr = self.read_image_header()
wcs = wcs_pv2sip_hdr(hdr, stepsize=stepsize)
return wcs
def read_image(self, header=None, **kwargs):
print('Reading image from', self.imgfn, 'hdu', self.hdu)
R = self._read_fits(self.imgfn, self.hdu, header=header, **kwargs)
if header:
img,header = R
else:
img = R
img = img.astype(np.float32)
if header:
return img,header
return img
def read_invvar(self, clip=True, **kwargs):
'''
Reads the inverse-variance (weight) map image.
'''
#return self._read_fits(self.wtfn, self.hdu, **kwargs)
img = self.read_image(**kwargs)
# # Estimate per-pixel noise via Blanton's 5-pixel MAD
slice1 = (slice(0,-5,10),slice(0,-5,10))
slice2 = (slice(5,None,10),slice(5,None,10))
mad = np.median(np.abs(img[slice1] - img[slice2]).ravel())
sig1 = 1.4826 * mad / np.sqrt(2.)
print('sig1 estimate:', sig1)
invvar = np.ones_like(img) / sig1**2
return invvar
def read_dq(self, **kwargs):
'''
Reads the Data Quality (DQ) mask image.
'''
print('Reading data quality image', self.dqfn, 'ext', self.hdu)
dq = self._read_fits(self.dqfn, self.hdu, **kwargs)
print('Got', dq.dtype, dq.shape)
return dq
def read_sky_model(self, splinesky=False, slc=None, **kwargs):
sky = ConstantSky(0.)
return sky
class VistaImage(MegacamImage):
def __init__(self, *args, **kwargs):
super(VistaImage, self).__init__(*args, **kwargs)
self.name = 'VistaImage: %i-%s %s' % (self.expnum, self.ccdname, self.band)
self.wtfn = self.wtfn.replace('_weight.fits', '.weight.fits')
self.dqfn = self.dqfn.replace('_flag.fits', '.flg.fits')
assert(self.dqfn != self.imgfn)
assert(self.wtfn != self.imgfn)
def read_invvar(self, clip=True, **kwargs):
'''
Reads the inverse-variance (weight) map image.
'''
print('Reading weight map', self.wtfn, 'ext', self.hdu)
iv = self._read_fits(self.wtfn, self.hdu, **kwargs)
print('Read', iv.shape, iv.dtype)
print('Range', iv.min(), iv.max(), 'median', np.median(iv))
return iv
def read_dq(self, **kwargs):
'''
Reads the Data Quality (DQ) mask image.
'''
print('Reading data quality image', self.dqfn, 'ext', self.hdu)
dq = self._read_fits(self.dqfn, self.hdu, **kwargs)
print('Got', dq.dtype, dq.shape)
print('Blanking out bit 1...')
dq -= (dq & 1)
return dq
class CfhtlsImage(VistaImage):
def __init__(self, *args, **kwargs):
super(CfhtlsImage, self).__init__(*args, **kwargs)
self.name = 'CFHTLSImage: %s %s' % (self.ccdname, self.band)
self.wtfn = self.wtfn.replace('.weight.fits', '_weight.fits')
# Use MAD to scale the weight maps...?
def read_invvar(self, clip=True, **kwargs):
iv = super(CfhtlsImage, self).read_invvar(clip=clip, **kwargs)
img = super(CfhtlsImage, self).read_image(header=False, **kwargs)
chi = img * np.sqrt(iv)
# MAD of chi map...
slice1 = (slice(0,-5,10),slice(0,-5,10))
slice2 = (slice(5,None,10),slice(5,None,10))
diff = np.abs(chi[slice1] - chi[slice2])
diff = diff[(iv[slice1] > 0) * (iv[slice2] > 0)]
mad = np.median(diff.ravel())
sig1 = 1.4826 * mad / np.sqrt(2.)
print('MAD of chi map:', sig1)
iv /= sig1**2
return iv
class CfhtlsSurveyData(LegacySurveyData):
def find_file(self, filetype, **kwargs):
if filetype == 'ccds':
basedir = self.survey_dir
return [os.path.join(basedir, 'survey-ccds-cfhtls.fits.gz')]
return super(CfhtlsSurveyData, self).find_file(filetype, **kwargs)
def sed_matched_filters(self, bands):
# single-band filters
SEDs = []
for i,band in enumerate(bands):
sed = np.zeros(len(bands))
sed[i] = 1.
SEDs.append((band, sed))
if len(bands) > 1:
gri = dict(g=1., r=1., i=1., u=0., z=0.)
SEDs.append(('gri', [gri[b] for b in bands]))
#red = dict(u=0., g=2.5, r=1., i=0.4, z=0.4)
#SEDs.append(('Red', [red[b] for b in bands]))
return SEDs
# Hackily defined RA,Dec values that split the ACS 7x7 tiling into
# distinct 'bricks'
rasplits = np.array([ 149.72716429, 149.89394223, 150.06073352,
150.22752888, 150.39431559, 150.56110037])
decsplits = np.array([ 1.79290318, 1.95956698, 2.12623253,
2.2929002, 2.45956215, 2.62621403])
def read_acs_catalogs():
# Read all ACS catalogs
mfn = 'euclid-out/merged-catalog.fits'
if not os.path.exists(mfn):
TT = []
fns = glob('euclid-out/tractor/*/tractor-*.fits')
for fn in fns:
T = fits_table(fn)
print(len(T), 'from', fn)
mra = np.median(T.ra)
mdec = np.median(T.dec)
print(np.sum(T.brick_primary), 'PRIMARY')
I = np.flatnonzero(rasplits > mra)
if len(I) > 0:
T.brick_primary &= (T.ra < rasplits[I[0]])
print(np.sum(T.brick_primary), 'PRIMARY after RA high cut')
I = np.flatnonzero(rasplits < mra)
if len(I) > 0:
T.brick_primary &= (T.ra >= rasplits[I[-1]])
print(np.sum(T.brick_primary), 'PRIMARY after RA low cut')
I = np.flatnonzero(decsplits > mdec)
if len(I) > 0:
T.brick_primary &= (T.dec < decsplits[I[0]])
print(np.sum(T.brick_primary), 'PRIMARY after DEC high cut')
I = np.flatnonzero(decsplits < mdec)
if len(I) > 0:
T.brick_primary &= (T.dec >= decsplits[I[-1]])
print(np.sum(T.brick_primary), 'PRIMARY after DEC low cut')
TT.append(T)
T = merge_tables(TT)
del TT
T.writeto(mfn)
else:
T = fits_table(mfn)
return T
def get_survey(survey_dir='euclid', outdir='euclid-out'):
survey = LegacySurveyData(survey_dir=survey_dir, output_dir=outdir)
survey.image_typemap['acs-vis'] = AcsVisImage
survey.image_typemap['megacam'] = MegacamImage
survey.image_typemap['vista'] = VistaImage
survey.image_typemap['cfhtls'] = CfhtlsImage
return survey
def get_exposures_in_list(fn):
expnums = []
for line in open(fn,'r').readlines():
words = line.split()
e = int(words[0], 10)
expnums.append(e)
return expnums
def download():
fns = glob('euclid/images/megacam/lists/*.lst')
expnums = set()
for fn in fns:
expnums.update(get_exposures_in_list(fn))
print(len(expnums), 'unique exposure numbers')
for expnum in expnums:
need = []
fn = 'euclid/images/megacam/%ip.fits' % expnum
if not os.path.exists(fn):
print('Missing:', fn)
need.append(fn)
psffn = fn.replace('p.fits', 'p_psfex.psf')
if not os.path.exists(psffn):
print('Missing:', psffn)
need.append(psffn)
wtfn = fn.replace('p.fits', 'p_weight.fits')
if not os.path.exists(wtfn):
print('Missing:', wtfn)
need.append(wtfn)
dqfn = fn.replace('p.fits', 'p_flag.fits')
if not os.path.exists(dqfn):
print('Missing:', dqfn)
need.append(dqfn)
bands = ['u','g','r','i2','z']
for band in bands:
for fn in need:
url = 'http://limu.cfht.hawaii.edu/COSMOS-Tractor/FITS/MegaCam/%s/%s' % (band, os.path.basename(fn))
cmd = '(cd euclid/images/megacam && wget -c "%s")' % url
print(cmd)
os.system(cmd)
def queue_list(opt):
survey = get_survey()
ccds = survey.get_ccds_readonly()
ccds.index = np.arange(len(ccds))
if opt.name is None:
opt.name = os.path.basename(opt.queue_list).replace('.lst','')
allccds = []
expnums = get_exposures_in_list(opt.queue_list)
for e in expnums:
ccds = survey.find_ccds(expnum=e)
allccds.append(ccds)
for ccd in ccds:
print(ccd.index, '%i-%s' % (ccd.expnum, ccd.ccdname))
allccds = merge_tables(allccds)
# Now group by CCDname and print out sets of indices to run at once
for name in np.unique(allccds.ccdname):
I = np.flatnonzero(allccds.ccdname == name)
print(','.join(['%i'%i for i in allccds.index[I]]),
'%s-%s' % (opt.name, name))
def analyze(opt):
fn = opt.analyze
expnums = get_exposures_in_list(fn)
print(len(expnums), 'exposures')
name = os.path.basename(fn).replace('.lst','')
print('Image list name:', name)
#for ccd in range(36):
for ccd in [0]:
fn = os.path.join('euclid-out', 'forced',
'megacam-%s-ccd%02i.fits' % (name, ccd))
print('Reading', fn)
T = fits_table(fn)
print(len(T), 'from', fn)
fluxes = np.unique([c for c in T.columns()
if c.startswith('flux_') and
not c.startswith('flux_ivar_')])
print('Fluxes:', fluxes)
assert(len(fluxes) == 1)
fluxcol = fluxes[0]
print('Flux column:', fluxcol)
T.rename(fluxcol, 'flux')
T.rename(fluxcol.replace('flux_', 'flux_ivar_'), 'flux_ivar')
T.allflux = np.zeros((len(T), len(expnums)), np.float32)
T.allflux_ivar = np.zeros((len(T), len(expnums)), np.float32)
T.allx = np.zeros((len(T), len(expnums)), np.float32)
T.ally = np.zeros((len(T), len(expnums)), np.float32)
TT = []
for i,expnum in enumerate(expnums):
fn = os.path.join('euclid-out', 'forced',
'megacam-%i-ccd%02i.fits' % (expnum, ccd))
print('Reading', fn)
t = fits_table(fn)
t.iexp = np.zeros(len(t), np.uint8) + i
t.rename(fluxcol, 'flux')
t.rename(fluxcol.replace('flux_', 'flux_ivar_'), 'flux_ivar')
print(len(t), 'from', fn)
# Edge effects...
W,H = 2112, 4644
margin = 30
t.cut((t.x > margin) * (t.x < W-margin) *
(t.y > margin) * (t.y < H-margin))
print('Keeping', len(t), 'not close to the edges')
TT.append(t)
TT = merge_tables(TT)
imap = dict([((n,o),i) for i,(n,o) in enumerate(zip(T.brickname,
T.objid))])
TT.index = np.array([imap[(n,o)]
for n,o in zip(TT.brickname, TT.objid)])
T.allflux [TT.index, TT.iexp] = TT.flux
T.allflux_ivar[TT.index, TT.iexp] = TT.flux_ivar
T.allx [TT.index, TT.iexp] = TT.x
T.ally [TT.index, TT.iexp] = TT.y
T.cflux_ivar = np.sum(T.allflux_ivar, axis=1)
T.cflux = np.sum(T.allflux * T.allflux_ivar, axis=1) / T.cflux_ivar
ps = PlotSequence('euclid')
plt.clf()
ha = dict(range=(0, 1600), bins=100, histtype='step')
plt.hist(T.flux_ivar, color='k', **ha)
for i in range(len(expnums)):
plt.hist(T.allflux_ivar[:,i], **ha)
plt.xlabel('Invvar')
ps.savefig()
I = np.flatnonzero(T.flux_ivar > 0)
tf = np.repeat(T.flux [:,np.newaxis], len(expnums), axis=1)
tiv = np.repeat(T.flux_ivar[:,np.newaxis], len(expnums), axis=1)
J = np.flatnonzero((T.allflux_ivar * tiv) > 0)
plt.clf()
plt.plot(tf.flat[J], T.allflux.flat[J], 'b.', alpha=0.1)
plt.xlabel('Image set flux')
plt.ylabel('Exposure fluxes')
plt.xscale('symlog')
plt.yscale('symlog')
plt.title(name)
plt.axhline(0., color='k', alpha=0.1)
plt.axvline(0., color='k', alpha=0.1)
ax = plt.axis()
plt.plot([-1e6, 1e6], [-1e6, 1e6], 'k-', alpha=0.1)
plt.axis(ax)
ps.savefig()
# Outliers... turned out to be sources on the edges.
# J = np.flatnonzero(((T.allflux_ivar * tiv) > 0) *
# (np.abs(T.allflux) > 1.) *
# (np.abs(tf) < 1.))
# print('Plotting', len(J), 'with sig. diff')
# plt.clf()
# plt.scatter(T.allx.flat[J], T.ally.flat[J],
# c=(tf.flat[J] - T.allflux.flat[J]), alpha=0.1)
# plt.title(name)
# ps.savefig()
plt.clf()
plt.plot(T.flux[I], T.cflux[I], 'b.', alpha=0.1)
plt.xlabel('Image set flux')
plt.ylabel('Summed exposure fluxes')
plt.xscale('symlog')
plt.yscale('symlog')
plt.title(name)
plt.axhline(0., color='k', alpha=0.1)
plt.axvline(0., color='k', alpha=0.1)
ax = plt.axis()
plt.plot([-1e6, 1e6], [-1e6, 1e6], 'k-', alpha=0.1)
plt.axis(ax)
ps.savefig()
plt.clf()
plt.plot(T.flux_ivar[:,np.newaxis], T.allflux_ivar, 'b.', alpha=0.1)
plt.xlabel('Image set flux invvar')
plt.ylabel('Exposure flux invvars')
plt.title(name)
ps.savefig()
plt.clf()
plt.plot(T.flux_ivar, T.cflux_ivar, 'b.', alpha=0.1)
plt.xlabel('Image set flux invvar')
plt.ylabel('Summed exposure flux invvar')
plt.title(name)
ax = plt.axis()
plt.plot([-1e6, 1e6], [-1e6, 1e6], 'k-', alpha=0.1)
plt.axis(ax)
ps.savefig()
K = np.flatnonzero((T.flux_ivar * T.cflux_ivar) > 0)
plt.clf()
#plt.plot(T.flux_ivar, T.cflux_ivar, 'b.', alpha=0.1)
loghist(T.flux_ivar[K], T.cflux_ivar[K], 200)
plt.xlabel('Image set flux invvar')
plt.ylabel('Summed exposure flux invvar')
plt.title(name)
ps.savefig()
plt.clf()
loghist(T.flux[K] * np.sqrt(T.flux_ivar[K]),
T.cflux[K] * np.sqrt(T.cflux_ivar[K]), 200,
range=[[-5,25]]*2)
plt.xlabel('Image set S/N')
plt.ylabel('Summed exposures S/N')
plt.title(name)
ps.savefig()
ACS = read_acs_catalogs()
imap = dict([((n,o),i) for i,(n,o) in enumerate(zip(ACS.brickname,
ACS.objid))])
T.index = np.array([imap[(n,o)]
for n,o in zip(T.brickname, T.objid)])
ACS.cut(T.index)
plt.clf()
plt.plot(ACS.decam_flux, T.cflux, 'b.', alpha=0.1)
plt.xscale('symlog')
plt.yscale('symlog')
plt.xlabel('ACS flux (I)')
plt.ylabel('CFHT flux (i)')
plt.title(name)
plt.plot([0,1e4],[0,1e4], 'k-', alpha=0.1)
plt.ylim(-1, 1e4)
ps.savefig()
ACS.mag = -2.5 * (np.log10(ACS.decam_flux) - 9)
T.cmag = -2.5 * (np.log10(T.cflux) - 9)
plt.clf()
plt.plot(ACS.mag, T.cmag, 'b.', alpha=0.1)
plt.xlabel('ACS I (mag)')
plt.ylabel('Summed CFHT i (mag)')
plt.title(name)
plt.plot([0,1e4],[0,1e4], 'k-', alpha=0.5)
plt.axis([27, 16, 30, 15])
ps.savefig()
def analyze2(opt):
# Analyze forced photometry results
ps = PlotSequence('euclid')
forcedfn = 'forced-megacam.fits'
if os.path.exists(forcedfn):
F = fits_table(forcedfn)
else:
T = read_acs_catalogs()
print(len(T), 'ACS catalog entries')
#T.cut(T.brick_primary)
#print(len(T), 'primary')
# read Megacam forced-photometry catalogs
fns = glob('euclid-out/forced/megacam-*.fits')
fns.sort()
F = []
for fn in fns:
f = fits_table(fn)
print(len(f), 'from', fn)
fn = os.path.basename(fn)
fn = fn.replace('.fits','')
words = fn.split('-')
assert(words[0] == 'megacam')
expnum = int(words[1], 10)
ccdname = words[2]
f.expnum = np.array([expnum]*len(f))
f.ccdname = np.array([ccdname]*len(f))
F.append(f)
F = merge_tables(F)
objmap = dict([((brickname,objid),i) for i,(brickname,objid) in
enumerate(zip(T.brickname, T.objid))])
I = np.array([objmap[(brickname, objid)] for brickname,objid
in zip(F.brickname, F.objid)])
F.type = T.type[I]
F.acs_flux = T.decam_flux[I]
F.ra = T.ra[I]
F.dec = T.dec[I]
F.bx = T.bx[I]
F.by = T.by[I]
F.writeto(forcedfn)
print(len(F), 'forced photometry measurements')
# There's a great big dead zone around the outside of the image...
# roughly X=[0 to 33] and X=[2080 to 2112] and Y=[4612..]
J = np.flatnonzero((F.x > 40) * (F.x < 2075) * (F.y < 4600))
F.cut(J)
print('Cut out edges:', len(F))
F.fluxsn = F.flux * np.sqrt(F.flux_ivar)
F.mag = -2.5 * (np.log10(F.flux) - 9.)
I = np.flatnonzero(F.type == 'PSF ')
print(len(I), 'PSF')
for t in ['SIMP', 'DEV ', 'EXP ', 'COMP']:
J = np.flatnonzero(F.type == t)
print(len(J), t)
S = np.flatnonzero(F.type == 'SIMP')
print(len(S), 'SIMP')
S = F[S]
plt.clf()
plt.semilogx(F.fluxsn, F.mag, 'k.', alpha=0.1)
plt.semilogx(F.fluxsn[I], F.mag[I], 'r.', alpha=0.1)
plt.semilogx(S.fluxsn, S.mag, 'b.', alpha=0.1)
plt.xlabel('Megacam forced-photometry Flux S/N')
plt.ylabel('Megacam forced-photometry mag')
#plt.xlim(1., 1e5)
#plt.ylim(10, 26)
plt.xlim(1., 1e4)
plt.ylim(16, 26)
J = np.flatnonzero((F.fluxsn[I] > 4.5) * (F.fluxsn[I] < 5.5))
print(len(J), 'between flux S/N 4.5 and 5.5')
J = I[J]
medmag = np.median(F.mag[J])
print('Median mag', medmag)
plt.axvline(5., color='r', alpha=0.5, lw=2)
plt.axvline(5., color='k', alpha=0.5)
plt.axhline(medmag, color='r', alpha=0.5, lw=2)
plt.axhline(medmag, color='k', alpha=0.5)
J = np.flatnonzero((S.fluxsn > 4.5) * (S.fluxsn < 5.5))
print(len(J), 'SIMP between flux S/N 4.5 and 5.5')
medmag = np.median(S.mag[J])
print('Median mag', medmag)
plt.axhline(medmag, color='b', alpha=0.5, lw=2)
plt.axhline(medmag, color='k', alpha=0.5)
plt.title('Megacam forced-photometered from ACS-VIS')
ax = plt.axis()
p1 = plt.semilogx([0],[0], 'k.')
p2 = plt.semilogx([0], [0], 'r.')
p3 = plt.semilogx([0], [0], 'b.')
plt.legend((p1[0], p2[0], p3[0]),
('All sources', 'Point sources',
'"Simple" galaxies'))
plt.axis(ax)
ps.savefig()
def analyze_vista(opt):
# Analyze VISTA forced photometry results
ps = PlotSequence('vista')
survey = get_survey()
ccds = survey.get_ccds_readonly()
ccds.cut(ccds.camera == 'vista')
print(len(ccds), 'VISTA images')
for ccd in ccds:
im = survey.get_image_object(ccd)
tim = im.get_tractor_image(pixPsf=True, slc=(slice(0,2000), slice(0,2000)))
img = tim.getImage()
ie = tim.getInvError()
sig1 = tim.sig1
medians = True
# plot_correlations from legacyanalysis/check-noise.py
corrs = []
corrs_x = []
corrs_y = []
mads = []
mads_x = []
mads_y = []
rcorrs = []
rcorrs_x = []
rcorrs_y = []
dists = np.arange(1, 51)
for dist in dists:
offset = dist
slice1 = (slice(0,-offset,1),slice(0,-offset,1))
slice2 = (slice(offset,None,1),slice(offset,None,1))
slicex = (slice1[0], slice2[1])
slicey = (slice2[0], slice1[1])
corr = img[slice1] * img[slice2]
corr = corr[(ie[slice1] > 0) * (ie[slice2] > 0)]
diff = img[slice1] - img[slice2]
diff = diff[(ie[slice1] > 0) * (ie[slice2] > 0)]
sig1 = 1.4826 / np.sqrt(2.) * np.median(np.abs(diff).ravel())
mads.append(sig1)
#print('Dist', dist, '; number of corr pixels', len(corr))
#t0 = Time()
if medians:
rcorr = np.median(corr) / sig1**2
rcorrs.append(rcorr)
#t1 = Time()
corr = np.mean(corr) / sig1**2
#t2 = Time()
#print('median:', t1-t0)
#print('mean :', t2-t1)
corrs.append(corr)
corr = img[slice1] * img[slicex]
corr = corr[(ie[slice1] > 0) * (ie[slicex] > 0)]
diff = img[slice1] - img[slicex]
diff = diff[(ie[slice1] > 0) * (ie[slicex] > 0)]
sig1 = 1.4826 / np.sqrt(2.) * np.median(np.abs(diff).ravel())
mads_x.append(sig1)
if medians:
rcorr = np.median(corr) / sig1**2
rcorrs_x.append(rcorr)
corr = np.mean(corr) / sig1**2
corrs_x.append(corr)
corr = img[slice1] * img[slicey]
corr = corr[(ie[slice1] > 0) * (ie[slicey] > 0)]
diff = img[slice1] - img[slicey]
diff = diff[(ie[slice1] > 0) * (ie[slicey] > 0)]
#Nmad = len(diff)
sig1 = 1.4826 / np.sqrt(2.) * np.median(np.abs(diff).ravel())
mads_y.append(sig1)
if medians:
rcorr = np.median(corr) / sig1**2
rcorrs_y.append(rcorr)
corr = np.mean(corr) / sig1**2
corrs_y.append(corr)
#print('Dist', dist, '-> corr', corr, 'X,Y', corrs_x[-1], corrs_y[-1],
# 'robust', rcorrs[-1], 'X,Y', rcorrs_x[-1], rcorrs_y[-1])
pix = img[ie > 0].ravel()
Nmad = len(pix) / 2
P = np.random.permutation(len(pix))[:(Nmad*2)]
diff = pix[P[:Nmad]] - pix[P[Nmad:]]
mad_random = 1.4826 / np.sqrt(2.) * np.median(np.abs(diff))
plt.clf()
p1 = plt.plot(dists, corrs, 'b.-')
p2 = plt.plot(dists, corrs_x, 'r.-')
p3 = plt.plot(dists, corrs_y, 'g.-')
if medians:
p4 = plt.plot(dists, rcorrs, 'b.--')
p5 = plt.plot(dists, rcorrs_x, 'r.--')
p6 = plt.plot(dists, rcorrs_y, 'g.--')
plt.xlabel('Pixel offset')
plt.ylabel('Correlation')
plt.axhline(0, color='k', alpha=0.3)
plt.legend([p1[0],p2[0],p3[0]], ['Diagonal', 'X', 'Y'], loc='upper right')
plt.title('VISTA ' + tim.name)
ps.savefig()
plt.clf()
p4 = plt.plot(dists, mads, 'b.-')
p5 = plt.plot(dists, mads_x, 'r.-')
p6 = plt.plot(dists, mads_y, 'g.-')
plt.xlabel('Pixel offset')
plt.ylabel('MAD error estimate')
#plt.axhline(0, color='k', alpha=0.3)
p7 = plt.axhline(mad_random, color='k', alpha=0.3)
plt.legend([p4[0],p5[0],p6[0], p7], ['Diagonal', 'X', 'Y', 'Random'],
loc='lower right')
plt.title('VISTA ' + tim.name)
ps.savefig()
for band in ['Y','J','H']:
forcedfn = 'euclid-out/forced-vista-%s.fits' % band
F = fits_table(forcedfn)
F.rename('flux_%s' % band.lower(), 'flux')
F.rename('flux_ivar_%s' % band.lower(), 'flux_ivar')
T = read_acs_catalogs()
print(len(T), 'ACS catalog entries')
objmap = dict([((brickname,objid),i) for i,(brickname,objid) in
enumerate(zip(T.brickname, T.objid))])
I = np.array([objmap[(brickname, objid)] for brickname,objid
in zip(F.brickname, F.objid)])
F.type = T.type[I]
F.acs_flux = T.decam_flux[I]
F.ra = T.ra[I]
F.dec = T.dec[I]
F.bx = T.bx[I]
F.by = T.by[I]
#F.writeto(forcedfn)
print(len(F), 'forced photometry measurements')
F.fluxsn = F.flux * np.sqrt(F.flux_ivar)
F.mag = -2.5 * (np.log10(F.flux) - 9.)
I = np.flatnonzero(F.type == 'PSF ')
print(len(I), 'PSF')
for t in ['SIMP', 'DEV ', 'EXP ', 'COMP']:
J = np.flatnonzero(F.type == t)
print(len(J), t)
S = np.flatnonzero(F.type == 'SIMP')
print(len(S), 'SIMP')
S = F[S]
plt.clf()
plt.semilogx(F.fluxsn, F.mag, 'k.', alpha=0.1)
plt.semilogx(F.fluxsn[I], F.mag[I], 'r.', alpha=0.1)
plt.semilogx(S.fluxsn, S.mag, 'b.', alpha=0.1)
plt.xlabel('Vista forced-photometry Flux S/N')
plt.ylabel('Vista forced-photometry mag')
#plt.xlim(1., 1e5)
#plt.ylim(10, 26)
plt.xlim(1., 1e4)
plt.ylim(18, 28)
J = np.flatnonzero((F.fluxsn[I] > 4.5) * (F.fluxsn[I] < 5.5))
print(len(J), 'between flux S/N 4.5 and 5.5')
J = I[J]
medmag = np.median(F.mag[J])
print('Median mag', medmag)
plt.axvline(5., color='r', alpha=0.5, lw=2)
plt.axvline(5., color='k', alpha=0.5)
plt.axhline(medmag, color='r', alpha=0.5, lw=2)
plt.axhline(medmag, color='k', alpha=0.5)
J = np.flatnonzero((S.fluxsn > 4.5) * (S.fluxsn < 5.5))
print(len(J), 'SIMP between flux S/N 4.5 and 5.5')
medmag = np.median(S.mag[J])
print('Median mag', medmag)
plt.axhline(medmag, color='b', alpha=0.5, lw=2)
plt.axhline(medmag, color='k', alpha=0.5)
plt.title('VISTA %s forced-photometered from ACS-VIS' % band)
ax = plt.axis()
p1 = plt.semilogx([0],[0], 'k.')
p2 = plt.semilogx([0], [0], 'r.')
p3 = plt.semilogx([0], [0], 'b.')
plt.legend((p1[0], p2[0], p3[0]),
('All sources', 'Point sources',
'"Simple" galaxies'))
plt.axis(ax)
ps.savefig()
def analyze3(opt):
ps = PlotSequence('euclid')
name1s = ['g.SNR12-MIQ', 'i.SNR10-MIQ', 'r.SNR10-HIQ',
'u.SNR10-HIQ', 'z.SNR09-ALL', ]
name2s = [
['g.SNR10-MIQ', 'g.SNR10-BIQ', 'g.SNR09-LIQ', 'g.PC.Shallow'],
['i.SNR12-LIQ', 'i.SNR10-LIQ', 'i.SNR09-HIQ', 'i.PC.Shallow'],
['r.SNR12-LIQ', 'r.SNR10-LIQ', 'r.SNR08-MIQ', 'r.PC.Shallow'],
['u.SNR10-MIQ', 'u.SNR08-PIQ', 'u.SNR07-HIQ'],
['z.SNR08-BIQ', 'z.SNR06-LIQ', 'z.SNR06-HIQ', 'z.PC.Shallow'],
]
allnames = []
for n1,n2 in zip(name1s, name2s):
allnames.append(n1)
allnames.extend(n2)
for name in allnames:
F = fits_table('euclid-out/forced-%s.fits' % name)
F.fluxsn = F.flux * np.sqrt(F.flux_ivar)
F.mag = -2.5 * (np.log10(F.flux) - 9.)
I = np.flatnonzero(F.type == 'PSF ')
print(len(I), 'PSF')
P = F[I]
I = np.flatnonzero(F.type == 'SIMP')
print(len(I), 'SIMP')
S = F[I]
plt.clf()
plt.semilogx(F.fluxsn, F.mag, 'k.', alpha=0.01)
plt.semilogx(P.fluxsn, P.mag, 'r.', alpha=0.01)
plt.semilogx(S.fluxsn, S.mag, 'b.', alpha=0.01)
plt.xlabel('Megacam forced-photometry Flux S/N')
plt.ylabel('Megacam forced-photometry mag')
#plt.xlim(1., 1e4)
#plt.ylim(16, 26)
plt.xlim(1., 100.)
plt.ylim(22., 28.)
J = np.flatnonzero((P.fluxsn > 4.5) * (P.fluxsn < 5.5))
print(len(J), 'between flux S/N 4.5 and 5.5')
medmag_psf = np.median(P.mag[J])
print('Median mag', medmag_psf)
plt.axvline(5., color='r', alpha=0.5, lw=2)
plt.axvline(5., color='k', alpha=0.5)
plt.axhline(medmag_psf, color='r', alpha=0.5, lw=2)
plt.axhline(medmag_psf, color='k', alpha=0.5)
J = np.flatnonzero((S.fluxsn > 4.5) * (S.fluxsn < 5.5))
print(len(J), 'SIMP between flux S/N 4.5 and 5.5')
medmag_simp = np.median(S.mag[J])
print('Median mag', medmag_simp)
plt.axhline(medmag_simp, color='b', alpha=0.5, lw=2)
plt.axhline(medmag_simp, color='k', alpha=0.5)
plt.title('Megacam forced-photometered from ACS-VIS: %s' % name)
ax = plt.axis()
p1 = plt.semilogx([0],[0], 'k.')
p2 = plt.semilogx([0], [0], 'r.')
p3 = plt.semilogx([0], [0], 'b.')
plt.legend((p1[0], p2[0], p3[0]),
('All sources',
'Point sources (~%.2f @ 5 sigma)' % medmag_psf,
'"Simple" galaxies (~%.2f @ 5 sigma)' % medmag_simp))
plt.axis(ax)
ps.savefig()
for name1,N2 in zip(name1s, name2s):
T1 = fits_table('euclid-out/forced-%s.fits' % name1)
for name2 in N2:
T2 = fits_table('euclid-out/forced-%s.fits' % name2)
assert(len(T2) == len(T1))
I = np.flatnonzero((T1.flux_ivar > 0) & (T2.flux_ivar > 0))
plt.clf()
plt.plot(T1.flux[I], T2.flux[I], 'k.')
plt.xscale('symlog')
plt.yscale('symlog')
plt.xlabel(name1 + ' flux')
plt.ylabel(name2 + ' flux')
ps.savefig()
def analyze4(opt):
ps = PlotSequence('euclid')
names = ['r.SNR10-HIQ',
'r.SNR12-LIQ', 'r.SNR10-LIQ', 'r.SNR08-MIQ', 'r.PC.Shallow']
exps = [get_exposures_in_list('euclid/images/megacam/lists/%s.lst' % n)
for n in names]
name_to_exps = dict(zip(names, exps))
uexps = np.unique(np.hstack(exps))
print('Unique exposures:', uexps)
e1 = uexps[0]
e2 = uexps[1]
for e1,e2 in zip(uexps[::2], uexps[1::2]):
TT1 = []
TT2 = []
plt.clf()
for ccd in range(36):
fn1 = os.path.join('euclid-out', 'forced',
'megacam-%s-ccd%02i.fits' % (e1, ccd))
fn2 = os.path.join('euclid-out', 'forced',
'megacam-%s-ccd%02i.fits' % (e2, ccd))
T1 = fits_table(fn1)
T2 = fits_table(fn2)
print(len(T1), 'from', fn1)
print(len(T2), 'from', fn2)
imap = dict([((n,o),i) for i,(n,o) in enumerate(zip(T1.brickname,
T1.objid))])
imatch = np.array([imap.get((n,o), -1) for n,o in zip(T2.brickname, T2.objid)])
T2.cut(imatch >= 0)
T1.cut(imatch[imatch >= 0])
print(len(T1), 'matched')
I = np.flatnonzero((T1.flux_ivar_r > 0) * (T2.flux_ivar_r > 0))
T1.cut(I)
T2.cut(I)
print(len(T1), 'good')
TT1.append(T1)
TT2.append(T2)
n,b,p = plt.hist((T1.flux_r - T2.flux_r) / np.sqrt(1./T1.flux_ivar_r + 1./T2.flux_ivar_r),
range=(-5, 5), bins=100,
histtype='step', color='k', label='All sources', normed=True)
binc = (b[1:] + b[:-1])/2.
yy = np.exp(-0.5 * binc**2)
p1 = plt.plot(binc, yy / yy.sum() * np.sum(n), 'k-', alpha=0.5, lw=2,
label='N(0,1)')
yy = np.exp(-0.5 * binc**2 / (1.2**2))
p2 = plt.plot(binc, yy / yy.sum() * np.sum(n), 'k--', alpha=0.5, lw=2,
label='N(0,1.2)')
plt.xlabel('Flux difference / Flux errors (sigma)')
plt.xlim(-5,5)
plt.title('Forced phot differences: Megacam r %s to %s' % (e1, e2))
ps.savefig()
T1 = merge_tables(TT1)
T2 = merge_tables(TT2)
plt.clf()
n,b,p = plt.hist((T1.flux_r - T2.flux_r) / np.sqrt(1./T1.flux_ivar_r + 1./T2.flux_ivar_r),
range=(-5, 5), bins=100,
histtype='step', color='k', label='All sources', normed=True)
binc = (b[1:] + b[:-1])/2.
yy = np.exp(-0.5 * binc**2)
plt.plot(binc, yy / yy.sum() * np.sum(n), 'k-', alpha=0.5, lw=2,
label='N(0,1)')
yy = np.exp(-0.5 * binc**2 / (1.2**2))
p2 = plt.plot(binc, yy / yy.sum() * np.sum(n), 'k--', alpha=0.5, lw=2,
label='N(0,1.2)')
#for magcut in [21,23]:
for magcut in [23]:
meanmag = -2.5 * (np.log10((T1.flux_r + T2.flux_r)/2.) - 9)
I = np.flatnonzero(meanmag < magcut)
plt.hist((T1.flux_r[I] - T2.flux_r[I]) /
np.sqrt(1./T1.flux_ivar_r[I] + 1./T2.flux_ivar_r[I]),
range=(-5, 5), bins=100,
histtype='step', label='mag < %g' % magcut, normed=True)
plt.xlabel('Flux difference / Flux errors (sigma)')
plt.xlim(-5,5)
#plt.title('Forced phot differences: Megacam r %s to %s, ccd%02i' % (e1, e2, ccd))
plt.title('Forced phot differences: Megacam r %s to %s' % (e1, e2))
plt.legend(loc='upper right')
ps.savefig()
T1.mag = -2.5 * (np.log10(T1.flux_r) - 9)
T2.mag = -2.5 * (np.log10(T2.flux_r) - 9)
plt.clf()
ha = dict(range=(20,26), bins=100, histtype='step')
plt.hist(T1.mag, color='b', **ha)
plt.hist(T2.mag, color='r', **ha)
plt.xlabel('Mag')
plt.title('Forced phot: Megacam r %s, %s, ccd%02i' % (e1, e2, ccd))
ps.savefig()
break
from collections import Counter
#name1 = names[0]
#names2 = names[1:]
for iname1,name1 in enumerate(names):
for name2 in names[iname1+1:]:
#T1 = fits_table('euclid-out/forced-%s.fits' % name1)
#T2 = fits_table('euclid-out/forced-%s.fits' % name2)
T1 = fits_table('euclid-out/forced2-%s.fits' % name1)
T2 = fits_table('euclid-out/forced2-%s.fits' % name2)
#print('T1 nexp:', Counter(T1.nexp).most_common())
#print('T2 nexp:', Counter(T2.nexp).most_common())
mexp1 = np.median(T1.nexp[T1.nexp > 0])
mexp2 = np.median(T2.nexp[T2.nexp > 0])
I = np.flatnonzero((T1.nexp >= mexp1) * (T2.nexp >= mexp2))
T1.cut(I)
T2.cut(I)
print(len(T1), 'sources kept')
plt.clf()
n,b,p = plt.hist((T1.flux - T2.flux) /
np.sqrt(1./T1.flux_ivar + 1./T2.flux_ivar),
range=(-5, 5), bins=100,
histtype='step', color='k', label='All sources', normed=True)
# for magcut in [23, 24, 25]:
# meanmag = -2.5 * (np.log10((T1.flux + T2.flux)/2.) - 9)
# I = np.flatnonzero(meanmag < magcut)
# plt.hist((T1.flux[I] - T2.flux[I]) /
# np.sqrt(1./T1.flux_ivar[I] + 1./T2.flux_ivar[I]),
# range=(-5, 5), bins=100,
# histtype='step', label='mag < %g' % magcut, normed=True)
for maglo, maghi in [(18,23), (23,24), (24,25)]:
meanmag = -2.5 * (np.log10((T1.flux + T2.flux)/2.) - 9)
I = np.flatnonzero((meanmag > maglo) * (meanmag <= maghi))
plt.hist((T1.flux[I] - T2.flux[I]) /
np.sqrt(1./T1.flux_ivar[I] + 1./T2.flux_ivar[I]),
range=(-5, 5), bins=100,
histtype='step', label='mag %g to %g' % (maglo,maghi),
normed=True)
binc = (b[1:] + b[:-1])/2.
yy = np.exp(-0.5 * binc**2)
plt.plot(binc, yy / yy.sum() * np.sum(n), 'k-', alpha=0.5, lw=2,
label='N(0,1)')
yy = np.exp(-0.5 * binc**2 / (1.2**2))
plt.plot(binc, yy / yy.sum() * np.sum(n), 'k--', alpha=0.5, lw=2,
label='N(0,1.2)')
yy = np.exp(-0.5 * binc**2 / (1.25**2))
plt.plot(binc, yy / yy.sum() * np.sum(n), 'k-',
label='N(0,1.25)')
exps1 = set(name_to_exps[name1])
exps2 = set(name_to_exps[name2])
print(name1, 'has', len(exps1))
print(name2, 'has', len(exps2))
incommon = len(exps1.intersection(exps2))
print(incommon, 'in common')
plt.legend(loc='upper right')
plt.xlabel('Flux difference / Flux errors (sigma)')
plt.xlim(-5,5)
#plt.title('Forced phot differences: Megacam %s to %s (%i exposures in common)' % (name1, name2, incommon))
plt.title('Forced phot differences: Megacam %s to %s' % (name1, name2))
ps.savefig()
# plt.clf()
# n,b,p = loghist(np.log(T1.flux),
# np.log(T2.flux),
# range=((-3,5),(-3,5)), bins=200)
# plt.xlabel('log Flux 1')
# plt.ylabel('log Flux 2')
# plt.title('Forced phot differences: Megacam %s to %s' % (name1, name2))
# ps.savefig()
# plt.clf()
# n,b,p = loghist(np.log((T1.flux + T2.flux)/2.),
# (T1.flux - T2.flux) /
# np.sqrt(1./T1.flux_ivar + 1./T2.flux_ivar),
# range=((-3,5),(-5,5)), bins=200)
# plt.axhline(0, color=(0.3,0.3,1.0))
# plt.xlabel('log average Flux')
# plt.ylabel('Flux difference / Flux errors (sigma)')
# plt.title('Forced phot differences: Megacam %s to %s' % (name1, name2))
# ps.savefig()
# T1.mag = -2.5 * (np.log10(T1.flux) - 9)
# T2.mag = -2.5 * (np.log10(T2.flux) - 9)
# plt.clf()
# ha = dict(range=(20,28), bins=100, histtype='step')
# plt.hist(T1.mag, color='b', **ha)
# plt.hist(T2.mag, color='r', **ha)
# plt.xlabel('Mag')
# plt.title('Forced phot: Megacam r %s, %s, ccd%02i' % (e1, e2, ccd))
# ps.savefig()
break
def geometry():
# Regular tiling with small overlaps; RA,Dec aligned
# In this dataset, the Megacam images are also very nearly all aligned.
survey = get_survey()
ccds = survey.get_ccds_readonly()
T = ccds[ccds.camera == 'acs-vis']
print(len(T), 'ACS CCDs')
plt.clf()
for ccd in T:
wcs = survey.get_approx_wcs(ccd)
h,w = wcs.shape
rr,dd = wcs.pixelxy2radec([1,w,w,1,1], [1,1,h,h,1])
plt.plot(rr, dd, 'b-')
T = ccds[ccds.camera == 'megacam']
print(len(T), 'Megacam CCDs')
for ccd in T:
wcs = survey.get_approx_wcs(ccd)
h,w = wcs.shape
rr,dd = wcs.pixelxy2radec([1,w,w,1,1], [1,1,h,h,1])
plt.plot(rr, dd, 'r-')
plt.savefig('ccd-outlines.png')
TT = []
fns = glob('euclid-out/tractor/*/tractor-*.fits')
for fn in fns:
T = fits_table(fn)
print(len(T), 'from', fn)
TT.append(T)
T = merge_tables(TT)
plt.clf()
plothist(T.ra, T.dec, 200)
plt.savefig('acs-sources1.png')
T = fits_table('euclid/survey-ccds-acsvis.fits.gz')
for ccd in T:
wcs = survey.get_approx_wcs(ccd)
h,w = wcs.shape
rr,dd = wcs.pixelxy2radec([1,w,w,1,1], [1,1,h,h,1])
plt.plot(rr, dd, 'b-')
plt.savefig('acs-sources2.png')
return 0
# It's a 7x7 grid... hackily define RA,Dec boundaries.
T = fits_table('euclid/survey-ccds-acsvis.fits.gz')
ras = T.ra.copy()
ras.sort()
decs = T.dec.copy()
decs.sort()
print('RAs:', ras)
print('Decs:', decs)
ras = ras.reshape((-1, 7)).mean(axis=1)
decs = decs.reshape((-1, 7)).mean(axis=1)
print('RAs:', ras)
print('Decs:', decs)
rasplits = (ras[:-1] + ras[1:])/2.
print('RA boundaries:', rasplits)
decsplits = (decs[:-1] + decs[1:])/2.
print('Dec boundaries:', decsplits)
def stage_plot_model(tims=None, bands=None, targetwcs=None,
lanczos=None, survey=None, brickname=None, version_header=None,
mp=None, coadd_bw=None, **kwargs):
#ACS = read_acs_catalogs()
fn = survey.find_file('tractor', brick=brickname, output=True)
ACS = fits_table(fn)
print('Read', len(ACS), 'ACS catalog entries')
ACS.cut(ACS.brick_primary)
print('Cut to', len(ACS), 'primary')
ACS.cut(ACS.decam_flux_ivar > 0)
tim = tims[0]
#ok,xx,yy = tim.getWcs().wcs.radec2pixelxy(ACS.ra, ACS.dec)
ok,xx,yy = tim.subwcs.radec2pixelxy(ACS.ra, ACS.dec)
H,W = tim.shape
ACS.cut((xx >= 1.) * (xx < W) * (yy >= 1.) * (yy < H))
print('Cut to', len(ACS), 'in image')
print('Creating catalog objects...')
cat = read_fits_catalog(ACS, allbands=bands, bands=bands)
tr = Tractor(tims, cat)
# Clip sizes of big models
tim = tims[0]
for src in cat:
from tractor.galaxy import ProfileGalaxy
if isinstance(src, ProfileGalaxy):
px,py = tim.wcs.positionToPixel(src.getPosition())
h = src._getUnitFluxPatchSize(tim, px, py, tim.modelMinval)
MAXHALF = 128
if h > MAXHALF:
print('halfsize', h,'for',src,'-> setting to',MAXHALF)
src.halfsize = MAXHALF
print('Rendering mods...')
mods = list(tr.getModelImages())
print('Writing coadds...')
from legacypipe.coadds import write_coadd_images, make_coadds
C = make_coadds(tims, bands, targetwcs, mods=mods, lanczos=lanczos,
callback=write_coadd_images,
callback_args=(survey, brickname, version_header, tims, targetwcs),
plots=False, mp=mp)
coadd_list= [('image',C.coimgs,rgbkwargs),
('model', C.comods, rgbkwargs),
('resid', C.coresids, rgbkwargs_resid)]
for name,ims,rgbkw in coadd_list:
rgb = get_rgb(ims, bands, **rgbkw)
kwa = {}
if coadd_bw and len(bands) == 1:
rgb = rgb.sum(axis=2)
kwa = dict(cmap='gray')
with survey.write_output(name + '-jpeg', brick=brickname) as out:
imsave_jpeg(out.fn, rgb, origin='lower', **kwa)
print('Wrote', out.fn)
del rgb
from legacypipe.runbrick import stage_tims, stage_mask_junk
from legacypipe.runbrick import stage_srcs, stage_fitblobs, stage_coadds, stage_writecat
def reduce_acs_image(opt, survey):
ccds = survey.get_ccds_readonly()
ccds.cut(ccds.camera == 'acs-vis')
print('Cut to', len(ccds), 'from ACS-VIS')
ccds.cut(ccds.expnum == opt.expnum)
print('Cut to', len(ccds), 'with expnum', opt.expnum)
allccds = ccds
prereqs_update = {'plot_model': 'mask_junk'}
from astrometry.util.stages import CallGlobalTime
stagefunc = CallGlobalTime('stage_%s', globals())
for iccd in range(len(allccds)):
# Process just this single CCD.
survey.ccds = allccds[np.array([iccd])]
ccd = survey.ccds[0]
brickname = 'acsvis-%03i' % ccd.expnum
run_brick(brickname, survey, pixscale=0.1,
# euclid-out2
#radec = (150.588, 1.776),
# euclid-out3
#radec = (150.570, 1.779),
# euclid-out4
#radec = (150.598, 1.767),
#width = 540, height=540,
radec=(ccd.ra, ccd.dec),
width=ccd.width, height=ccd.height,
#forceAll=True,
#writePickles=False,
#stages=['image_coadds'],
#stages=['plot_model'],
bands=['I'],
threads=opt.threads,
wise=False, do_calibs=False,
pixPsf=True, hybridPsf=True,
rex=True,
coadd_bw=True, ceres=False,
blob_image=True,
write_metrics=True,
allbands=allbands,
checkpoint_filename=os.path.join('checkpoints', 'checkpoint-%s.pickle' % brickname),
prereqs_update=prereqs_update,
stagefunc=stagefunc)
#plots=True, plotbase='euclid',
return 0
def package(opt):
print('HACK -- fixing up VISTA forced photometry...')
# Match VISTA images to the others...
ACS = fits_table('euclid-out/forced-r.SNR10-HIQ.fits')
imap = dict([((n,o),i) for i,(n,o) in enumerate(zip(ACS.brickname,
ACS.objid))])
for band in ['Y','J','H']:
fn = 'euclid-out/forced-vista-%s.fits' % band
T = fits_table(fn)
print('Read', len(T), 'from', fn)
T.rename('flux_%s' % band.lower(), 'flux')
T.rename('flux_ivar_%s' % band.lower(), 'flux_ivar')
T.acsindex = np.array([imap.get((n,o), -1)
for n,o in zip(T.brickname, T.objid)])
T.cut(T.acsindex >= 0)
print('Kept', len(T), 'with a match')
ACS.flux = np.zeros(len(ACS), np.float32)
ACS.flux_ivar = np.zeros(len(ACS), np.float32)
ACS.nexp = np.zeros(len(ACS), np.uint8)
ACS.flux[T.acsindex] = T.flux
ACS.flux_ivar[T.acsindex] = T.flux_ivar
ACS.nexp[T.acsindex] = 1
fn = 'euclid-out/forced-vista-%s-2.fits' % band
ACS.writeto(fn)
print('Wrote', fn)
return 0
ACS = read_acs_catalogs()
print('Read', len(ACS), 'ACS catalog entries')
ACS.cut(ACS.brick_primary)
print('Cut to', len(ACS), 'primary')
imap = dict([((n,o),i) for i,(n,o) in enumerate(zip(ACS.brickname,
ACS.objid))])
keepacs = np.zeros(len(ACS), bool)
listfns = glob('euclid/images/megacam/lists/*.lst')
listnames = [os.path.basename(fn).replace('.lst','') for fn in listfns]
explists = [get_exposures_in_list(fn) for fn in listfns]
ccds = list(range(36))
results = []
for expnums in explists:
cflux = np.zeros(len(ACS), np.float32)
cflux_ivar = np.zeros(len(ACS), np.float32)
nexp = np.zeros(len(ACS), np.uint8)
for i,expnum in enumerate(expnums):
for ccd in ccds:
fn = os.path.join('euclid-out', 'forced',
'megacam-%i-ccd%02i.fits' % (expnum, ccd))
print('Reading', fn)
t = fits_table(fn)
fluxes = np.unique([c for c in t.columns()
if c.startswith('flux_') and
not c.startswith('flux_ivar_')])
print('Fluxes:', fluxes)
assert(len(fluxes) == 1)
fluxcol = fluxes[0]
print('Flux column:', fluxcol)
t.iexp = np.zeros(len(t), np.uint8) + i
t.ccd = np.zeros(len(t), np.uint8) + ccd
t.rename(fluxcol, 'flux')
t.rename(fluxcol.replace('flux_', 'flux_ivar_'), 'flux_ivar')
print(len(t), 'from', fn)
# Edge effects...
W,H = 2112, 4644
margin = 30
t.cut((t.x > margin) * (t.x < W-margin) *
(t.y > margin) * (t.y < H-margin))
print('Keeping', len(t), 'not close to the edges')
acsindex = np.array([imap[(n,o)]
for n,o in zip(t.brickname, t.objid)])
# Find sources with existing measurements
# I = np.flatnonzero((cflux_ivar[acsindex] > 0) * (cflux[acsindex] > 0) * (t.flux > 0))
# if len(I) > 100:
# print(len(I), 'sources have previous measurements')
# ai = acsindex[I]
# cmag,cmagerr = NanoMaggies.fluxErrorsToMagErrors(
# cflux[ai] / np.maximum(cflux_ivar[ai], 1e-16), cflux_ivar[ai])
# mag,magerr = NanoMaggies.fluxErrorsToMagErrors(
# t.flux[I], t.flux_ivar[I])
# var = np.maximum(cmagerr, 0.02)**2 + np.maximum(magerr, 0.02)**2
# offset = np.sum((mag - cmag) * 1./var) / np.sum(1./var)
# print('Offset of', offset, 'mag')
# scale = 10.**(offset / 2.5)
# print('Applying scale of', scale, 'to fluxes and ivars')
# t.flux *= scale
# t.flux_ivar /= (scale**2)
cflux [acsindex] += t.flux * t.flux_ivar
cflux_ivar[acsindex] += t.flux_ivar
nexp [acsindex] += 1
keepacs[acsindex] = True
filt = np.unique(t.filter)
assert(len(filt) == 1)
filt = filt[0]
results.append((cflux / np.maximum(cflux_ivar, 1e-16), cflux_ivar, nexp, filt))
ACS.rename('decam_flux', 'acs_flux')
ACS.rename('decam_flux_ivar', 'acs_flux_ivar')
for name,expnums,(cflux, cflux_ivar, nexp, filt) in zip(
listnames, explists, results):
ACS.flux = cflux
ACS.flux_ivar = cflux_ivar
ACS.nexp = nexp
T = ACS[keepacs]
primhdr = fitsio.FITSHDR()
for i,expnum in enumerate(expnums):
primhdr.add_record(dict(name='EXP%i'%i, value=expnum,
comment='MegaCam exposure num'))
primhdr.add_record(dict(name='FILTER', value=filt,
comment='MegaCam filter'))
T.writeto('euclid-out/forced2-%s.fits' % name,
columns=['ra', 'dec', 'flux', 'flux_ivar', 'nexp',
'brickname', 'objid', 'type', 'ra_ivar', 'dec_ivar',
'dchisq', 'ebv', 'acs_flux', 'acs_flux_ivar',
'fracdev', 'fracdev_ivar',
'shapeexp_r', 'shapeexp_r_ivar', 'shapeexp_e1',
'shapeexp_e1_ivar', 'shapeexp_e2', 'shapeexp_e2_ivar',
'shapedev_r', 'shapedev_r_ivar', 'shapedev_e1',
'shapedev_e1_ivar', 'shapedev_e2', 'shapedev_e2_ivar'],
primheader=primhdr)
def main():
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('--zeropoints', action='store_true',
help='Read image files to produce CCDs tables.')
parser.add_argument('--download', action='store_true',
help='Download images listed in image list files')
parser.add_argument('--queue-list',
help='List the CCD indices of the images list in the given exposure list filename')
parser.add_argument('--name',
help='Name for forced-phot output products, with --queue-list')
parser.add_argument('--out',
help='Filename for forced-photometry output catalog')
parser.add_argument('--outdir',
default='euclid-out')
parser.add_argument('--survey-dir',
default='euclid')
parser.add_argument('--analyze',
help='Analyze forced photometry results for images listed in given image list file')
parser.add_argument('--package', action='store_true',
help='Average and package up forced photometry results for different Megacam image lists')
parser.add_argument('--expnum', type=int,
help='ACS exposure number to run')
parser.add_argument('--threads', type=int, help='Run multi-threaded')
parser.add_argument('--forced', help='Run forced photometry for given MegaCam CCD index (int) or comma-separated list of indices')
parser.add_argument('--fits', help='Forced photometry: base filename to write *-model.fits and *-image.fits')
parser.add_argument('--ceres', action='store_true', help='Use Ceres?')
parser.add_argument(
'--zoom', type=int, nargs=4,
help='Set target image extent (default "0 3600 0 3600")')
parser.add_argument('--grid', action='store_true',
help='Forced photometry on a grid of subimages?')
parser.add_argument('--derivs', action='store_true',
help='Astrometric Forced photometry')
parser.add_argument('--skip', action='store_true',
help='Skip forced-photometry if output file exists?')
parser.add_argument('--brick', help='Run one brick of CFHTLS reduction')
parser.add_argument('--wise', action='store_true',
help='Run unWISE forced phot')
parser.add_argument('--plot', action='store_true')
opt = parser.parse_args()
Time.add_measurement(MemMeas)
if opt.plot:
from astrometry.util.util import Tan
plt.clf()
for i in range(25,104+1):
fns = glob('euclid-rex/coadd/acs/acsvis-%03i/*-image-I.fits' % i)
print(i, '->', fns)
if len(fns) == 0:
continue
fn = fns[0]
wcs = Tan(fn, 0)
h,w = wcs.shape
xx,yy = [1,1,w,w,1], [1,h,h,1,1]
rr,dd = wcs.pixelxy2radec(xx, yy)
plt.plot(rr, dd)
plt.plot(rr[0], dd[0], 'o')
plt.plot(rr[1], dd[1], 'x')
plt.plot(rr[2], dd[2], 's')
plt.text(np.mean(rr), np.mean(dd), '%03i' % i)
plt.savefig('acs.png')
plt.clf()
for i in range(25,104+1):
fns = glob('euclid-rex/coadd/acs/acsvis-%03i/*-image-I.fits' % i)
if len(fns) == 0:
continue
fn = fns[0]
wcs = Tan(fn, 0)
h,w = wcs.shape
xx,yy = [1,1,w,w,1], [1,h,h,1,1]
rr,dd = wcs.pixelxy2radec(xx, yy)
plt.plot(rr, dd, 'b-', alpha=0.5)
if i == 87:
xx,yy = [200,200,1200,1200,200], [4750,5750,5750,4750,4750]
rr,dd = wcs.pixelxy2radec(xx, yy)
plt.plot(rr, dd, 'r-', alpha=0.5)
fn = 'euclid-rex/images/megacam/962810p.fits'
F = fitsio.FITS(fn)
for i in range(1, len(F)):
hdr = F[i].read_header()
wcs = wcs_pv2sip_hdr(hdr)
h,w = wcs.shape
xx,yy = [1,1,w,w,1], [1,h,h,1,1]
rr,dd = wcs.pixelxy2radec(xx, yy)
plt.plot(rr, dd, 'b-', alpha=0.5)
plt.text(np.mean(rr), np.mean(dd), '%i' % i)
plt.savefig('megacam.png')
return 0
if opt.zeropoints:
make_zeropoints()
return 0
if opt.download:
download()
return 0
if opt.queue_list:
queue_list(opt)
return 0
if opt.analyze:
#analyze(opt)
#analyze2(opt)
#analyze3(opt)
#geometry()
#analyze4(opt)
analyze_vista(opt)
return 0
if opt.package:
package(opt)
return 0
# Run CFHTLS alone
if opt.brick:
survey = CfhtlsSurveyData(output_dir='euclid-out/cfhtls')
survey.bricksize = 3./60.
survey.image_typemap.update({'cfhtls' : CfhtlsImage})
outfn = survey.find_file('tractor', brick=opt.brick, output=True)
print('Checking output file', outfn)
if os.path.exists(outfn):
print('File exists:', outfn)
return 0
global rgbkwargs, rgbkwargs_resid
rgbkwargs .update(scales=rgbscales_cfht)
rgbkwargs_resid.update(scales=rgbscales_cfht)
checkpointfn = 'checkpoints/checkpoint-%s.pickle' % opt.brick
return run_brick(opt.brick, survey, pixscale=0.186, width=1000, height=1000,
bands='ugriz', blacklist=False, wise=False, blob_image=True,
ceres=False,
pixPsf=True, constant_invvar=False, threads=opt.threads,
checkpoint_filename=checkpointfn,
checkpoint_period=300,
#stages=['image_coadds'],
#plots=True
)
if opt.wise:
A = fits_table('/project/projectdirs/cosmo/data/unwise/unwise-coadds/allsky-atlas.fits')
print(len(A), 'WISE atlas tiles')
T = read_acs_catalogs()
print('Read', len(T), 'ACS catalog entries')
T.cut(T.brick_primary)
print('Cut to', len(T), 'primary')
#T.about()
print('RA', T.ra.min(), T.ra.max())
print('Dec', T.dec.min(), T.dec.max())
# London talk...
ra0, ra1, dec0, dec1 = (150.5341329206139, 150.56191295395681, 2.5853063993539656, 2.6130578930632189)
margin = 0.001
T.cut((T.ra > ra0-margin) * (T.ra < ra1+margin) * (T.dec > dec0-margin) * (T.dec < dec1+margin))
print('Cut to', len(T))
# mag = -2.5 * (np.log10(T.decam_flux) - 9)
# mag = mag[np.isfinite(mag)]
# mag = mag[(mag > 10) * (mag < 30)]
# print(len(mag), 'with good mags')
# plt.clf()
# plt.hist(mag, 100)
# plt.xlabel('ACS mag')
# plt.savefig('acs-mags.png')
#
# plt.clf()
# plt.hist(T.decam_flux_ivar, 100)
# plt.xlabel('ACS flux_ivar')
# plt.savefig('acs-flux-iv.png')
T.cut(T.decam_flux_ivar > 0)
print('Cut to', len(T), 'with good flux ivar')
T.cut(T.decam_flux > 0)
print('Cut to', len(T), 'with good flux')
# TEST
#T = T[np.arange(0, len(T), 100)]
from astrometry.libkd.spherematch import match_radec
I,J,d = match_radec(A.ra, A.dec, T.ra, T.dec, 1.2, nearest=True)
print(len(I), 'atlas tiles possibly within range')
A.cut(I)
from wise.unwise import unwise_tile_wcs
from wise.forcedphot import unwise_forcedphot, get_unwise_tractor_image
#unwise_dir = '/project/projectdirs/cosmo/data/unwise/neo1/unwise-coadds/fulldepth/'
unwise_dir = '/global/projecta/projectdirs/cosmo/work/wise/outputs/merge/neo2/fulldepth/'
wcat = read_fits_catalog(T, allbands=['w'], bands=['w'])
roiradec = [ra0,ra1,dec0,dec1]
W = unwise_forcedphot(wcat, A, roiradecbox=roiradec, bands=[1,2],
unwise_dir=unwise_dir, use_ceres=True, save_fits=True)
print('WISE forced phot:', W)
W.writeto('wise.fits')
return
# tim used to limit sizes of huge models
tim = get_unwise_tractor_image(unwise_dir, A.coadd_id[0], 1, bandname='w')
Wall = []
for tile in A:
print('Tile', tile.coadd_id)
wcs = unwise_tile_wcs(tile.ra, tile.dec)
ok, T.x, T.y = wcs.radec2pixelxy(T.ra, T.dec)
H,W = wcs.shape
I = np.flatnonzero((T.x > 1) * (T.x < W) * (T.y > 1) * (T.y < H))
print(len(I), 'sources within image')
step = 256
margin = 10
for iy,yp in enumerate(np.arange(1, H, step)):
for ix,xp in enumerate(np.arange(1, W, step)):
I = np.flatnonzero((T.x > (xp - margin)) * (T.x < (xp + step + margin)) *
(T.y > (yp - margin)) * (T.y < (yp + step + margin)))
print(len(I), 'sources within sub-image')
if len(I) == 0:
continue
outfn = 'euclid-rex/grid/forced-unwise-%s-x%i-y%i.fits' % (tile.coadd_id, ix, iy)
print('Output filename', outfn)
if os.path.exists(outfn):
WW = fits_table(outfn)
print('Read', len(WW), 'from', outfn)
Wall.append(WW)
continue
bands = ['w']
wcat = read_fits_catalog(T[I], allbands=bands, bands=bands)
for src in wcat:
src.brightness = NanoMaggies(**dict([(b, 1.) for b in bands]))
for src in wcat:
from tractor.galaxy import ProfileGalaxy
if isinstance(src, ProfileGalaxy):
px,py = tim.wcs.positionToPixel(src.getPosition())
h = src._getUnitFluxPatchSize(tim, px, py, tim.modelMinval)
MAXHALF = 32
if h > MAXHALF:
print('halfsize', h,'for',src,'-> setting to',MAXHALF)
src.halfsize = MAXHALF
#wcat = [cat[i] for i in I]
tiles = [tile]
ra1,dec1 = wcs.pixelxy2radec(xp-margin, yp-margin)
ra2,dec2 = wcs.pixelxy2radec(xp+step+margin, yp+step+margin)
roiradec = [min(ra1,ra2), max(ra1,ra2),
min(dec1,dec2), max(dec1,dec2)]
#roiradec = [T.ra.min(), T.ra.max(), T.dec.min(), T.dec.max()]
use_ceres = True
WW = []
for band in [1,2]:
Wi = unwise_forcedphot(wcat, tiles, roiradecbox=roiradec, bands=[band],
unwise_dir=unwise_dir, use_ceres=use_ceres)
#print('Got', Wi)
#Wi.about()
WW.append(Wi)
# Merge W1,W2
W1,W2 = WW
WW = W1
WW.add_columns_from(W2)
WW.brickname = T.brickname[I]
WW.objid = T.objid[I]
WW.ra = T.ra[I]
WW.dec = T.dec[I]
WW.primary = ((T.x[I] >= xp) * (T.x[I] < xp+step) *
(T.y[I] >= yp) * (T.y[I] < yp+step))
WW.bx = T.x[I]
WW.by = T.y[I]
# cache
WW.writeto(outfn)
print('Wrote', outfn)
Wall.append(WW)
Wall = merge_tables(Wall)
Wall.writeto('euclid-rex/forced-unwise.fits')
return 0
if opt.expnum is None and opt.forced is None:
print('Need --expnum or --forced')
return -1
survey = get_survey(opt.survey_dir, opt.outdir)
ccds = survey.get_ccds_readonly()
lastcam = None
for i,cam in enumerate(ccds.camera):
if cam != lastcam:
print('Camera', cam, 'at', i)
lastcam = cam
if opt.expnum is not None:
# Run pipeline on a single ACS image.
return reduce_acs_image(opt, survey)
if opt.grid:
assert(opt.out)
i = int(opt.forced, 10)
print('CCD index', i)
ccds = survey.get_ccds_readonly()
ccd = ccds[i]
print('CCD', ccd)
im = survey.get_image_object(ccd)
print('Image', im)
H,W = im.height, im.width
print('Image size', W, 'x', H)
overlap = 50
ngrid = 20
xsize = (W + overlap) / ngrid
ysize = (H + overlap) / ngrid
print('Size without overlap:', xsize, ysize)
x0 = np.arange(ngrid) * xsize
x1 = x0 + xsize + overlap
x1[-1] = min(x1[-1], W)
print('X ranges:')
for xx0,xx1 in zip(x0,x1):
print(' ', xx0,xx1)
y0 = np.arange(ngrid) * ysize
y1 = y0 + ysize + overlap
y1[-1] = min(y1[-1], H)
print('Y ranges:')
for yy0,yy1 in zip(y0,y1):
print(' ', yy0,yy1)
print('Opt is a', opt)
fns = []
args = []
for iy,(yy0,yy1) in enumerate(zip(y0,y1)):
for ix,(xx0,xx1) in enumerate(zip(x0,x1)):
outfn = opt.out.replace('.fits', '-x%02i-y%02i.fits' % (ix,iy))
if os.path.exists(outfn):
print('Already exists:', outfn)
continue
newopt = argparse.Namespace()
for k,v in opt.__dict__.items():
setattr(newopt, k, v)
print('Setting option', k, '=', v)
fns.append((outfn, ix, iy))
newopt.out = outfn
newopt.zoom = [xx0, xx1, yy0, yy1]
print('New options:', newopt.__dict__)
args.append((newopt, survey))
xcuts = reduce(np.append, ([0], (x0[1:] + x1[:-1])/2., W))
ycuts = reduce(np.append, ([0], (y0[1:] + y1[:-1])/2., H))
print('X cuts:', xcuts)
print('Y cuts:', ycuts)
if opt.threads is None:
opt.threads = 1
mp = multiproc(opt.threads)
mp.map(_bounce_forced, args)
TT = []
for fn,ix,iy in fns:
T = fits_table(fn)
print('Read', len(T), 'from', fn)
print('X range:', T.x.min(), T.x.max())
xlo,xhi = xcuts[ix], xcuts[ix+1]
print('Cut lo,hi', xlo, xhi)
print('Y range:', T.y.min(), T.y.max())
ylo,yhi = ycuts[iy], ycuts[iy+1]
print('Cut lo,hi', ylo, yhi)
T.cut((T.x >= xlo) * (T.x < xhi) * (T.y >= ylo) * (T.y < yhi))
print('Cut to', len(T), 'sources')
TT.append(T)
TT = merge_tables(TT)
TT.writeto(opt.out)
print('Wrote', opt.out)
return 0
return forced_photometry(opt, survey)
def _bounce_forced(args):
return forced_photometry(*args)
def forced_photometry(opt, survey):
# Run forced photometry on a given image or set of Megacam images,
# using the ACS catalog as the source.
ps = PlotSequence('euclid')
#if opt.name is None:
# opt.name = '%i-%s' % (im.expnum, im.ccdname)
if opt.out is not None:
outfn = opt.out
else:
outfn = 'euclid-out/forced/megacam-%s.fits' % opt.name
if opt.skip and os.path.exists(outfn):
print('Output file exists:', outfn)
return 0
t0 = Time()
T = read_acs_catalogs()
print('Read', len(T), 'ACS catalog entries')
T.cut(T.brick_primary)
print('Cut to', len(T), 'primary')
# plt.clf()
# I = T.brick_primary
# plothist(T.ra[I], T.dec[I], 200)
# plt.savefig('acs-sources3.png')
opti = None
forced_kwargs = {}
if opt.ceres:
from tractor.ceres_optimizer import CeresOptimizer
B = 8
opti = CeresOptimizer(BW=B, BH=B)
ccds = survey.get_ccds_readonly()
#I = np.flatnonzero(ccds.camera == 'megacam')
#print(len(I), 'MegaCam CCDs')
I = np.array([int(x,10) for x in opt.forced.split(',')])
print(len(I), 'CCDs: indices', I)
print('Exposure numbers:', ccds.expnum[I])
print('CCD names:', ccds.ccdname[I])
slc = None
if opt.zoom:
x0,x1,y0,y1 = opt.zoom
zw = x1-x0
zh = y1-y0
slc = slice(y0,y1), slice(x0,x1)
tims = []
keep_sources = np.zeros(len(T), bool)
for i in I:
ccd = ccds[i]
im = survey.get_image_object(ccd)
print('CCD', im)
wcs = im.get_wcs()
if opt.zoom:
wcs = wcs.get_subimage(x0, y0, zw, zh)
ok,x,y = wcs.radec2pixelxy(T.ra, T.dec)
x = (x-1).astype(np.float32)
y = (y-1).astype(np.float32)
h,w = wcs.shape
J = np.flatnonzero((x >= 0) * (x < w) *
(y >= 0) * (y < h))
if len(J) == 0:
print('No sources within image.')
continue
print(len(J), 'sources are within this image')
keep_sources[J] = True
tim = im.get_tractor_image(pixPsf=True, slc=slc,
# DECam
splinesky=True
)
print('Tim:', tim)
tims.append(tim)
# plt.clf()
# plt.hist((tim.getImage() * tim.getInvError()).ravel(), range=(-5,5), bins=100,
# histtype='step', color='b')
# plt.xlim(-5,5)
# plt.xlabel('Pixel sigmas')
# plt.title(tim.name)
# ps.savefig()
#
# plt.clf()
# plt.imshow(tim.getImage(), interpolation='nearest', origin='lower',
# vmin=-2.*tim.sig1, vmax=5.*tim.sig1)
# plt.title(tim.name)
# ps.savefig()
#
# ax = plt.axis()
# plt.plot(x[keep_sources], y[keep_sources], 'b.')
# plt.axis(ax)
# ps.savefig()
T.cut(keep_sources)
print('Cut to', len(T), 'sources within at least one CCD')
bands = np.unique([tim.band for tim in tims])
print('Bands:', bands)
# convert to string
bands = ''.join(bands)
cat = read_fits_catalog(T, allbands=bands, bands=bands)
for src in cat:
src.brightness = NanoMaggies(**dict([(b, 1.) for b in bands]))
for src in cat:
# Limit sizes of huge models
from tractor.galaxy import ProfileGalaxy
if isinstance(src, ProfileGalaxy):
tim = tims[0]
px,py = tim.wcs.positionToPixel(src.getPosition())
h = src._getUnitFluxPatchSize(tim, px, py, tim.modelMinval)
MAXHALF = 128
if h > MAXHALF:
print('halfsize', h,'for',src,'-> setting to',MAXHALF)
src.halfsize = MAXHALF
#print('Source:', src)
#print('Params:', src.getParamNames())
src.freezeAllBut('brightness')
#src.getBrightness().freezeAllBut(tim.band)
if opt.derivs:
from legacypipe.forced_photom_decam import SourceDerivatives
assert(len(bands) == 1)
band = bands[0]
realsrcs = []
derivsrcs = []
for src in cat:
realsrcs.append(src)
bright_dra = src.getBrightness().copy()
bright_ddec = src.getBrightness().copy()
bright_dra .setParams(np.zeros(bright_dra .numberOfParams()))
bright_ddec.setParams(np.zeros(bright_ddec.numberOfParams()))
bright_dra .freezeAllBut(tim.band)
bright_ddec.freezeAllBut(tim.band)
dsrc = SourceDerivatives(src, [band], ['pos'],
[bright_dra, bright_ddec])
derivsrcs.append(dsrc)
if hasattr(src, 'halfsize'):
dsrc.halfsize = src.halfsize
# For convenience, put all the real sources at the front of
# the list, so we can pull the IVs off the front of the list.
cat = realsrcs + derivsrcs
tr = Tractor(tims, cat, optimizer=opti)
tr.freezeParam('images')
disable_galaxy_cache()
F = fits_table()
F.brickid = T.brickid
F.brickname = T.brickname
F.objid = T.objid
F.mjd = np.array([tim.primhdr.get('MJD-OBS', 0.)] * len(T)).astype(np.float32)
F.exptime = np.array([tim.primhdr.get('EXPTIME', 0.)] * len(T)).astype(np.float32)
if len(tims) == 1:
tim = tims[0]
F.filter = np.array([tim.band] * len(T))
ok,x,y = tim.sip_wcs.radec2pixelxy(T.ra, T.dec)
F.x = (x-1).astype(np.float32)
F.y = (y-1).astype(np.float32)
t1 = Time()
print('Prior to forced photometry:', t1-t0)
# FIXME -- should we run one band at a time?
# Reset fluxes
nparams = tr.numberOfParams()
tr.setParams(np.zeros(nparams, np.float32))
if opt.derivs:
# Set the source fluxes to 1, not zero, so that the derivatives exist.
pp = np.zeros(nparams, np.float32)
pp[:nparams/3] = 1
tr.setParams(pp)
# print('Fitting params:')
# tr.printThawedParams()
R = tr.optimize_forced_photometry(variance=True, fitstats=False, #fitstats=True,
shared_params=False, priors=False,
**forced_kwargs)
# print('Fitted params:')
# tr.printThawedParams()
t2 = Time()
print('Forced photometry:', t2-t1)
if opt.fits:
tim = tims[0]
fn = '%s-image.fits' % opt.fits
print('Writing image to', fn)
fitsio.write(fn, tim.getImage(), clobber=True)
mod = tr.getModelImage(0)
fn = '%s-model.fits' % opt.fits
print('Writing model to', fn)
fitsio.write(fn, mod, clobber=True)
rgbkwargs = dict(mnmx=(-1,100.), arcsinh=1.)
fn = '%s-image.jpg' % opt.fits
print('Writing image to', fn)
rgb = get_rgb([tim.getImage()], [tim.band], scales=rgbscales_cfht,
**rgbkwargs)
for i in range(3):
print('range in plane', i, ':', rgb[:,:,i].min(), rgb[:,:,i].max())
print('RGB', rgb.shape, rgb.dtype)
(plane,scale) = rgbscales_cfht[tim.band]
#rgb = rgb.sum(axis=2)
rgb = rgb[:,:,plane]
print('RGB', rgb.shape)
#imsave_jpeg(rgb, fn)
plt.imsave(fn, rgb, vmin=0, vmax=1, cmap='gray')
fn = '%s-model.jpg' % opt.fits
print('Writing model to', fn)
rgb = get_rgb([mod], [tim.band], scales=rgbscales_cfht,
**rgbkwargs)
print('RGB', rgb.shape)
#rgb = rgb.sum(axis=2)
rgb = rgb[:,:,plane]
print('RGB', rgb.shape)
#imsave_jpeg(rgb, fn)
plt.imsave(fn, rgb, vmin=0, vmax=1, cmap='gray')
ie = tim.getInvError()
noise = np.random.normal(size=ie.shape) * 1./ie
noise[ie == 0] = 0.
noisymod = mod + noise
fn = '%s-model+noise.jpg' % opt.fits
print('Writing model+noise to', fn)
rgb = get_rgb([noisymod], [tim.band], scales=rgbscales_cfht,
**rgbkwargs)
rgb = rgb[:,:,plane]
#rgb = rgb.sum(axis=2)
plt.imsave(fn, rgb, vmin=0, vmax=1, cmap='gray')
fn = '%s-cat.fits' % opt.fits
print('Writing catalog to', fn)
T.writeto(fn)
units = {'exptime':'sec' }# 'flux':'nanomaggy', 'flux_ivar':'1/nanomaggy^2'}
for band in bands:
if opt.derivs:
cat = realsrcs
N = len(cat)
F.set('flux_%s' % band, np.array([src.getBrightness().getFlux(band)
for src in cat]).astype(np.float32))
units.update({'flux_%s' % band:' nanomaggy',
'flux_ivar_%s' % band:'1/nanomaggy^2'})
if opt.derivs:
F.flux_dra = np.array([src.getParams()[0] for src in derivsrcs]).astype(np.float32)
F.flux_ddec = np.array([src.getParams()[1] for src in derivsrcs]).astype(np.float32)
F.flux_dra_ivar = R.IV[N ::2].astype(np.float32)
F.flux_ddec_ivar = R.IV[N+1::2].astype(np.float32)
# HACK -- use the parameter-setting machinery to set the source
# brightnesses to the *inverse-variance* estimates, then read them
# off...
p0 = tr.getParams()
tr.setParams(R.IV)
for band in bands:
F.set('flux_ivar_%s' % band,
np.array([src.getBrightness().getFlux(band)
for src in cat]).astype(np.float32))
tr.setParams(p0)
hdr = fitsio.FITSHDR()
columns = F.get_columns()
for i,col in enumerate(columns):
if col in units:
hdr.add_record(dict(name='TUNIT%i' % (i+1), value=units[col]))
primhdr = fitsio.FITSHDR()
primhdr.add_record(dict(name='EXPNUM', value=im.expnum,
comment='Exposure number'))
primhdr.add_record(dict(name='CCDNAME', value=im.ccdname,
comment='CCD name'))
primhdr.add_record(dict(name='CAMERA', value=im.camera,
comment='Camera'))
fitsio.write(outfn, None, header=primhdr, clobber=True)
F.writeto(outfn, header=hdr, append=True)
print('Wrote', outfn)
t3 = Time()
print('Wrap-up:', t3-t2)
print('Total:', t3-t0)
return 0
if __name__ == '__main__':
sys.exit(main())
|
legacysurvey/pipeline
|
py/legacyanalysis/euclid.py
|
Python
|
gpl-2.0
| 90,291
|
[
"Galaxy"
] |
89a820c8721e570969f82fcb3e31233e1bdbc4956e15dfd54dd6072cee7cc7f0
|
# This file is part of PlexPy.
#
# PlexPy is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# PlexPy is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with PlexPy. If not, see <http://www.gnu.org/licenses/>.
from urlparse import urlparse
import base64
import json
import cherrypy
from email.mime.text import MIMEText
import email.utils
from httplib import HTTPSConnection
import os
import shlex
import smtplib
import subprocess
from urllib import urlencode
import urllib
import urllib2
from urlparse import parse_qsl
from pynma import pynma
import gntp.notifier
import oauth2 as oauth
import pythontwitter as twitter
import pythonfacebook as facebook
import plexpy
from plexpy import logger, helpers, request
from plexpy.helpers import checked
AGENT_IDS = {"Growl": 0,
"Prowl": 1,
"XBMC": 2,
"Plex": 3,
"NMA": 4,
"Pushalot": 5,
"Pushbullet": 6,
"Pushover": 7,
"OSX Notify": 8,
"Boxcar2": 9,
"Email": 10,
"Twitter": 11,
"IFTTT": 12,
"Telegram": 13,
"Slack": 14,
"Scripts": 15,
"Facebook": 16}
def available_notification_agents():
agents = [{'name': 'Growl',
'id': AGENT_IDS['Growl'],
'config_prefix': 'growl',
'has_config': True,
'state': checked(plexpy.CONFIG.GROWL_ENABLED),
'on_play': plexpy.CONFIG.GROWL_ON_PLAY,
'on_stop': plexpy.CONFIG.GROWL_ON_STOP,
'on_pause': plexpy.CONFIG.GROWL_ON_PAUSE,
'on_resume': plexpy.CONFIG.GROWL_ON_RESUME,
'on_buffer': plexpy.CONFIG.GROWL_ON_BUFFER,
'on_watched': plexpy.CONFIG.GROWL_ON_WATCHED,
'on_created': plexpy.CONFIG.GROWL_ON_CREATED,
'on_extdown': plexpy.CONFIG.GROWL_ON_EXTDOWN,
'on_intdown': plexpy.CONFIG.GROWL_ON_INTDOWN,
'on_extup': plexpy.CONFIG.GROWL_ON_EXTUP,
'on_intup': plexpy.CONFIG.GROWL_ON_INTUP
},
{'name': 'Prowl',
'id': AGENT_IDS['Prowl'],
'config_prefix': 'prowl',
'has_config': True,
'state': checked(plexpy.CONFIG.PROWL_ENABLED),
'on_play': plexpy.CONFIG.PROWL_ON_PLAY,
'on_stop': plexpy.CONFIG.PROWL_ON_STOP,
'on_pause': plexpy.CONFIG.PROWL_ON_PAUSE,
'on_resume': plexpy.CONFIG.PROWL_ON_RESUME,
'on_buffer': plexpy.CONFIG.PROWL_ON_BUFFER,
'on_watched': plexpy.CONFIG.PROWL_ON_WATCHED,
'on_created': plexpy.CONFIG.PROWL_ON_CREATED,
'on_extdown': plexpy.CONFIG.PROWL_ON_EXTDOWN,
'on_intdown': plexpy.CONFIG.PROWL_ON_INTDOWN,
'on_extup': plexpy.CONFIG.PROWL_ON_EXTUP,
'on_intup': plexpy.CONFIG.PROWL_ON_INTUP
},
{'name': 'XBMC',
'id': AGENT_IDS['XBMC'],
'config_prefix': 'xbmc',
'has_config': True,
'state': checked(plexpy.CONFIG.XBMC_ENABLED),
'on_play': plexpy.CONFIG.XBMC_ON_PLAY,
'on_stop': plexpy.CONFIG.XBMC_ON_STOP,
'on_pause': plexpy.CONFIG.XBMC_ON_PAUSE,
'on_resume': plexpy.CONFIG.XBMC_ON_RESUME,
'on_buffer': plexpy.CONFIG.XBMC_ON_BUFFER,
'on_watched': plexpy.CONFIG.XBMC_ON_WATCHED,
'on_created': plexpy.CONFIG.XBMC_ON_CREATED,
'on_extdown': plexpy.CONFIG.XBMC_ON_EXTDOWN,
'on_intdown': plexpy.CONFIG.XBMC_ON_INTDOWN,
'on_extup': plexpy.CONFIG.XBMC_ON_EXTUP,
'on_intup': plexpy.CONFIG.XBMC_ON_INTUP
},
{'name': 'Plex',
'id': AGENT_IDS['Plex'],
'config_prefix': 'plex',
'has_config': True,
'state': checked(plexpy.CONFIG.PLEX_ENABLED),
'on_play': plexpy.CONFIG.PLEX_ON_PLAY,
'on_stop': plexpy.CONFIG.PLEX_ON_STOP,
'on_pause': plexpy.CONFIG.PLEX_ON_PAUSE,
'on_resume': plexpy.CONFIG.PLEX_ON_RESUME,
'on_buffer': plexpy.CONFIG.PLEX_ON_BUFFER,
'on_watched': plexpy.CONFIG.PLEX_ON_WATCHED,
'on_created': plexpy.CONFIG.PLEX_ON_CREATED,
'on_extdown': plexpy.CONFIG.PLEX_ON_EXTDOWN,
'on_intdown': plexpy.CONFIG.PLEX_ON_INTDOWN,
'on_extup': plexpy.CONFIG.PLEX_ON_EXTUP,
'on_intup': plexpy.CONFIG.PLEX_ON_INTUP
},
{'name': 'NotifyMyAndroid',
'id': AGENT_IDS['NMA'],
'config_prefix': 'nma',
'has_config': True,
'state': checked(plexpy.CONFIG.NMA_ENABLED),
'on_play': plexpy.CONFIG.NMA_ON_PLAY,
'on_stop': plexpy.CONFIG.NMA_ON_STOP,
'on_pause': plexpy.CONFIG.NMA_ON_PAUSE,
'on_resume': plexpy.CONFIG.NMA_ON_RESUME,
'on_buffer': plexpy.CONFIG.NMA_ON_BUFFER,
'on_watched': plexpy.CONFIG.NMA_ON_WATCHED,
'on_created': plexpy.CONFIG.NMA_ON_CREATED,
'on_extdown': plexpy.CONFIG.NMA_ON_EXTDOWN,
'on_intdown': plexpy.CONFIG.NMA_ON_INTDOWN,
'on_extup': plexpy.CONFIG.NMA_ON_EXTUP,
'on_intup': plexpy.CONFIG.NMA_ON_INTUP
},
{'name': 'Pushalot',
'id': AGENT_IDS['Pushalot'],
'config_prefix': 'pushalot',
'has_config': True,
'state': checked(plexpy.CONFIG.PUSHALOT_ENABLED),
'on_play': plexpy.CONFIG.PUSHALOT_ON_PLAY,
'on_stop': plexpy.CONFIG.PUSHALOT_ON_STOP,
'on_pause': plexpy.CONFIG.PUSHALOT_ON_PAUSE,
'on_resume': plexpy.CONFIG.PUSHALOT_ON_RESUME,
'on_buffer': plexpy.CONFIG.PUSHALOT_ON_BUFFER,
'on_watched': plexpy.CONFIG.PUSHALOT_ON_WATCHED,
'on_created': plexpy.CONFIG.PUSHALOT_ON_CREATED,
'on_extdown': plexpy.CONFIG.PUSHALOT_ON_EXTDOWN,
'on_intdown': plexpy.CONFIG.PUSHALOT_ON_INTDOWN,
'on_extup': plexpy.CONFIG.PUSHALOT_ON_EXTUP,
'on_intup': plexpy.CONFIG.PUSHALOT_ON_INTUP
},
{'name': 'Pushbullet',
'id': AGENT_IDS['Pushbullet'],
'config_prefix': 'pushbullet',
'has_config': True,
'state': checked(plexpy.CONFIG.PUSHBULLET_ENABLED),
'on_play': plexpy.CONFIG.PUSHBULLET_ON_PLAY,
'on_stop': plexpy.CONFIG.PUSHBULLET_ON_STOP,
'on_pause': plexpy.CONFIG.PUSHBULLET_ON_PAUSE,
'on_resume': plexpy.CONFIG.PUSHBULLET_ON_RESUME,
'on_buffer': plexpy.CONFIG.PUSHBULLET_ON_BUFFER,
'on_watched': plexpy.CONFIG.PUSHBULLET_ON_WATCHED,
'on_created': plexpy.CONFIG.PUSHBULLET_ON_CREATED,
'on_extdown': plexpy.CONFIG.PUSHBULLET_ON_EXTDOWN,
'on_intdown': plexpy.CONFIG.PUSHBULLET_ON_INTDOWN,
'on_extup': plexpy.CONFIG.PUSHBULLET_ON_EXTUP,
'on_intup': plexpy.CONFIG.PUSHBULLET_ON_INTUP
},
{'name': 'Pushover',
'id': AGENT_IDS['Pushover'],
'config_prefix': 'pushover',
'has_config': True,
'state': checked(plexpy.CONFIG.PUSHOVER_ENABLED),
'on_play': plexpy.CONFIG.PUSHOVER_ON_PLAY,
'on_stop': plexpy.CONFIG.PUSHOVER_ON_STOP,
'on_pause': plexpy.CONFIG.PUSHOVER_ON_PAUSE,
'on_resume': plexpy.CONFIG.PUSHOVER_ON_RESUME,
'on_buffer': plexpy.CONFIG.PUSHOVER_ON_BUFFER,
'on_watched': plexpy.CONFIG.PUSHOVER_ON_WATCHED,
'on_created': plexpy.CONFIG.PUSHOVER_ON_CREATED,
'on_extdown': plexpy.CONFIG.PUSHOVER_ON_EXTDOWN,
'on_intdown': plexpy.CONFIG.PUSHOVER_ON_INTDOWN,
'on_extup': plexpy.CONFIG.PUSHOVER_ON_EXTUP,
'on_intup': plexpy.CONFIG.PUSHOVER_ON_INTUP
},
{'name': 'Boxcar2',
'id': AGENT_IDS['Boxcar2'],
'config_prefix': 'boxcar',
'has_config': True,
'state': checked(plexpy.CONFIG.BOXCAR_ENABLED),
'on_play': plexpy.CONFIG.BOXCAR_ON_PLAY,
'on_stop': plexpy.CONFIG.BOXCAR_ON_STOP,
'on_pause': plexpy.CONFIG.BOXCAR_ON_PAUSE,
'on_resume': plexpy.CONFIG.BOXCAR_ON_RESUME,
'on_buffer': plexpy.CONFIG.BOXCAR_ON_BUFFER,
'on_watched': plexpy.CONFIG.BOXCAR_ON_WATCHED,
'on_created': plexpy.CONFIG.BOXCAR_ON_CREATED,
'on_extdown': plexpy.CONFIG.BOXCAR_ON_EXTDOWN,
'on_intdown': plexpy.CONFIG.BOXCAR_ON_INTDOWN,
'on_extup': plexpy.CONFIG.BOXCAR_ON_EXTUP,
'on_intup': plexpy.CONFIG.BOXCAR_ON_INTUP
},
{'name': 'E-mail',
'id': AGENT_IDS['Email'],
'config_prefix': 'email',
'has_config': True,
'state': checked(plexpy.CONFIG.EMAIL_ENABLED),
'on_play': plexpy.CONFIG.EMAIL_ON_PLAY,
'on_stop': plexpy.CONFIG.EMAIL_ON_STOP,
'on_pause': plexpy.CONFIG.EMAIL_ON_PAUSE,
'on_resume': plexpy.CONFIG.EMAIL_ON_RESUME,
'on_buffer': plexpy.CONFIG.EMAIL_ON_BUFFER,
'on_watched': plexpy.CONFIG.EMAIL_ON_WATCHED,
'on_created': plexpy.CONFIG.EMAIL_ON_CREATED,
'on_extdown': plexpy.CONFIG.EMAIL_ON_EXTDOWN,
'on_intdown': plexpy.CONFIG.EMAIL_ON_INTDOWN,
'on_extup': plexpy.CONFIG.EMAIL_ON_EXTUP,
'on_intup': plexpy.CONFIG.EMAIL_ON_INTUP
},
{'name': 'Twitter',
'id': AGENT_IDS['Twitter'],
'config_prefix': 'twitter',
'has_config': True,
'state': checked(plexpy.CONFIG.TWITTER_ENABLED),
'on_play': plexpy.CONFIG.TWITTER_ON_PLAY,
'on_stop': plexpy.CONFIG.TWITTER_ON_STOP,
'on_pause': plexpy.CONFIG.TWITTER_ON_PAUSE,
'on_resume': plexpy.CONFIG.TWITTER_ON_RESUME,
'on_buffer': plexpy.CONFIG.TWITTER_ON_BUFFER,
'on_watched': plexpy.CONFIG.TWITTER_ON_WATCHED,
'on_created': plexpy.CONFIG.TWITTER_ON_CREATED,
'on_extdown': plexpy.CONFIG.TWITTER_ON_EXTDOWN,
'on_intdown': plexpy.CONFIG.TWITTER_ON_INTDOWN,
'on_extup': plexpy.CONFIG.TWITTER_ON_EXTUP,
'on_intup': plexpy.CONFIG.TWITTER_ON_INTUP
},
{'name': 'IFTTT',
'id': AGENT_IDS['IFTTT'],
'config_prefix': 'ifttt',
'has_config': True,
'state': checked(plexpy.CONFIG.IFTTT_ENABLED),
'on_play': plexpy.CONFIG.IFTTT_ON_PLAY,
'on_stop': plexpy.CONFIG.IFTTT_ON_STOP,
'on_pause': plexpy.CONFIG.IFTTT_ON_PAUSE,
'on_resume': plexpy.CONFIG.IFTTT_ON_RESUME,
'on_buffer': plexpy.CONFIG.IFTTT_ON_BUFFER,
'on_watched': plexpy.CONFIG.IFTTT_ON_WATCHED,
'on_created': plexpy.CONFIG.IFTTT_ON_CREATED,
'on_extdown': plexpy.CONFIG.IFTTT_ON_EXTDOWN,
'on_intdown': plexpy.CONFIG.IFTTT_ON_INTDOWN,
'on_extup': plexpy.CONFIG.IFTTT_ON_EXTUP,
'on_intup': plexpy.CONFIG.IFTTT_ON_INTUP
},
{'name': 'Telegram',
'id': AGENT_IDS['Telegram'],
'config_prefix': 'telegram',
'has_config': True,
'state': checked(plexpy.CONFIG.TELEGRAM_ENABLED),
'on_play': plexpy.CONFIG.TELEGRAM_ON_PLAY,
'on_stop': plexpy.CONFIG.TELEGRAM_ON_STOP,
'on_pause': plexpy.CONFIG.TELEGRAM_ON_PAUSE,
'on_resume': plexpy.CONFIG.TELEGRAM_ON_RESUME,
'on_buffer': plexpy.CONFIG.TELEGRAM_ON_BUFFER,
'on_watched': plexpy.CONFIG.TELEGRAM_ON_WATCHED,
'on_created': plexpy.CONFIG.TELEGRAM_ON_CREATED,
'on_extdown': plexpy.CONFIG.TELEGRAM_ON_EXTDOWN,
'on_intdown': plexpy.CONFIG.TELEGRAM_ON_INTDOWN,
'on_extup': plexpy.CONFIG.TELEGRAM_ON_EXTUP,
'on_intup': plexpy.CONFIG.TELEGRAM_ON_INTUP
},
{'name': 'Slack',
'id': AGENT_IDS['Slack'],
'config_prefix': 'slack',
'has_config': True,
'state': checked(plexpy.CONFIG.SLACK_ENABLED),
'on_play': plexpy.CONFIG.SLACK_ON_PLAY,
'on_stop': plexpy.CONFIG.SLACK_ON_STOP,
'on_resume': plexpy.CONFIG.SLACK_ON_RESUME,
'on_pause': plexpy.CONFIG.SLACK_ON_PAUSE,
'on_buffer': plexpy.CONFIG.SLACK_ON_BUFFER,
'on_watched': plexpy.CONFIG.SLACK_ON_WATCHED,
'on_created': plexpy.CONFIG.SLACK_ON_CREATED,
'on_extdown': plexpy.CONFIG.SLACK_ON_EXTDOWN,
'on_intdown': plexpy.CONFIG.SLACK_ON_INTDOWN,
'on_extup': plexpy.CONFIG.SLACK_ON_EXTUP,
'on_intup': plexpy.CONFIG.SLACK_ON_INTUP
},
{'name': 'Scripts',
'id': AGENT_IDS['Scripts'],
'config_prefix': 'scripts',
'has_config': True,
'state': checked(plexpy.CONFIG.SCRIPTS_ENABLED),
'on_play': plexpy.CONFIG.SCRIPTS_ON_PLAY,
'on_stop': plexpy.CONFIG.SCRIPTS_ON_STOP,
'on_pause': plexpy.CONFIG.SCRIPTS_ON_PAUSE,
'on_resume': plexpy.CONFIG.SCRIPTS_ON_RESUME,
'on_buffer': plexpy.CONFIG.SCRIPTS_ON_BUFFER,
'on_watched': plexpy.CONFIG.SCRIPTS_ON_WATCHED,
'on_created': plexpy.CONFIG.SCRIPTS_ON_CREATED,
'on_extdown': plexpy.CONFIG.SCRIPTS_ON_EXTDOWN,
'on_extup': plexpy.CONFIG.SCRIPTS_ON_EXTUP,
'on_intdown': plexpy.CONFIG.SCRIPTS_ON_INTDOWN,
'on_intup': plexpy.CONFIG.SCRIPTS_ON_INTUP
},
{'name': 'Facebook',
'id': AGENT_IDS['Facebook'],
'config_prefix': 'facebook',
'has_config': True,
'state': checked(plexpy.CONFIG.FACEBOOK_ENABLED),
'on_play': plexpy.CONFIG.FACEBOOK_ON_PLAY,
'on_stop': plexpy.CONFIG.FACEBOOK_ON_STOP,
'on_pause': plexpy.CONFIG.FACEBOOK_ON_PAUSE,
'on_resume': plexpy.CONFIG.FACEBOOK_ON_RESUME,
'on_buffer': plexpy.CONFIG.FACEBOOK_ON_BUFFER,
'on_watched': plexpy.CONFIG.FACEBOOK_ON_WATCHED,
'on_created': plexpy.CONFIG.FACEBOOK_ON_CREATED,
'on_extdown': plexpy.CONFIG.FACEBOOK_ON_EXTDOWN,
'on_intdown': plexpy.CONFIG.FACEBOOK_ON_INTDOWN,
'on_extup': plexpy.CONFIG.FACEBOOK_ON_EXTUP,
'on_intup': plexpy.CONFIG.FACEBOOK_ON_INTUP
}
]
# OSX Notifications should only be visible if it can be used
osx_notify = OSX_NOTIFY()
if osx_notify.validate():
agents.append({'name': 'OSX Notify',
'id': AGENT_IDS['OSX Notify'],
'config_prefix': 'osx_notify',
'has_config': True,
'state': checked(plexpy.CONFIG.OSX_NOTIFY_ENABLED),
'on_play': plexpy.CONFIG.OSX_NOTIFY_ON_PLAY,
'on_stop': plexpy.CONFIG.OSX_NOTIFY_ON_STOP,
'on_pause': plexpy.CONFIG.OSX_NOTIFY_ON_PAUSE,
'on_resume': plexpy.CONFIG.OSX_NOTIFY_ON_RESUME,
'on_buffer': plexpy.CONFIG.OSX_NOTIFY_ON_BUFFER,
'on_watched': plexpy.CONFIG.OSX_NOTIFY_ON_WATCHED,
'on_created': plexpy.CONFIG.OSX_NOTIFY_ON_CREATED,
'on_extdown': plexpy.CONFIG.OSX_NOTIFY_ON_EXTDOWN,
'on_intdown': plexpy.CONFIG.OSX_NOTIFY_ON_INTDOWN,
'on_extup': plexpy.CONFIG.OSX_NOTIFY_ON_EXTUP,
'on_intup': plexpy.CONFIG.OSX_NOTIFY_ON_INTUP
})
return agents
def get_notification_agent_config(config_id):
if str(config_id).isdigit():
config_id = int(config_id)
if config_id == 0:
growl = GROWL()
return growl.return_config_options()
elif config_id == 1:
prowl = PROWL()
return prowl.return_config_options()
elif config_id == 2:
xbmc = XBMC()
return xbmc.return_config_options()
elif config_id == 3:
plex = Plex()
return plex.return_config_options()
elif config_id == 4:
nma = NMA()
return nma.return_config_options()
elif config_id == 5:
pushalot = PUSHALOT()
return pushalot.return_config_options()
elif config_id == 6:
pushbullet = PUSHBULLET()
return pushbullet.return_config_options()
elif config_id == 7:
pushover = PUSHOVER()
return pushover.return_config_options()
elif config_id == 8:
osx_notify = OSX_NOTIFY()
return osx_notify.return_config_options()
elif config_id == 9:
boxcar = BOXCAR()
return boxcar.return_config_options()
elif config_id == 10:
email = Email()
return email.return_config_options()
elif config_id == 11:
tweet = TwitterNotifier()
return tweet.return_config_options()
elif config_id == 12:
iftttClient = IFTTT()
return iftttClient.return_config_options()
elif config_id == 13:
telegramClient = TELEGRAM()
return telegramClient.return_config_options()
elif config_id == 14:
slackClient = SLACK()
return slackClient.return_config_options()
elif config_id == 15:
script = Scripts()
return script.return_config_options()
elif config_id == 16:
facebook = FacebookNotifier()
return facebook.return_config_options()
else:
return []
else:
return []
def send_notification(config_id, subject, body, **kwargs):
if str(config_id).isdigit():
config_id = int(config_id)
if config_id == 0:
growl = GROWL()
growl.notify(message=body, event=subject)
elif config_id == 1:
prowl = PROWL()
prowl.notify(message=body, event=subject)
elif config_id == 2:
xbmc = XBMC()
xbmc.notify(subject=subject, message=body)
elif config_id == 3:
plex = Plex()
plex.notify(subject=subject, message=body)
elif config_id == 4:
nma = NMA()
nma.notify(subject=subject, message=body)
elif config_id == 5:
pushalot = PUSHALOT()
pushalot.notify(message=body, event=subject)
elif config_id == 6:
pushbullet = PUSHBULLET()
pushbullet.notify(message=body, subject=subject)
elif config_id == 7:
pushover = PUSHOVER()
pushover.notify(message=body, event=subject)
elif config_id == 8:
osx_notify = OSX_NOTIFY()
osx_notify.notify(title=subject, text=body)
elif config_id == 9:
boxcar = BOXCAR()
boxcar.notify(title=subject, message=body)
elif config_id == 10:
email = Email()
email.notify(subject=subject, message=body)
elif config_id == 11:
tweet = TwitterNotifier()
tweet.notify(subject=subject, message=body)
elif config_id == 12:
iftttClient = IFTTT()
iftttClient.notify(subject=subject, message=body)
elif config_id == 13:
telegramClient = TELEGRAM()
telegramClient.notify(message=body, event=subject)
elif config_id == 14:
slackClient = SLACK()
slackClient.notify(message=body, event=subject)
elif config_id == 15:
scripts = Scripts()
scripts.notify(message=body, subject=subject, **kwargs)
elif config_id == 16:
facebook = FacebookNotifier()
facebook.notify(subject=subject, message=body)
else:
logger.debug(u"PlexPy Notifiers :: Unknown agent id received.")
else:
logger.debug(u"PlexPy Notifiers :: Notification requested but no agent id received.")
class GROWL(object):
"""
Growl notifications, for OS X.
"""
def __init__(self):
self.enabled = plexpy.CONFIG.GROWL_ENABLED
self.host = plexpy.CONFIG.GROWL_HOST
self.password = plexpy.CONFIG.GROWL_PASSWORD
def conf(self, options):
return cherrypy.config['config'].get('Growl', options)
def notify(self, message, event):
if not message or not event:
return
# Split host and port
if self.host == "":
host, port = "localhost", 23053
if ":" in self.host:
host, port = self.host.split(':', 1)
port = int(port)
else:
host, port = self.host, 23053
# If password is empty, assume none
if self.password == "":
password = None
else:
password = self.password
# Register notification
growl = gntp.notifier.GrowlNotifier(
applicationName='PlexPy',
notifications=['New Event'],
defaultNotifications=['New Event'],
hostname=host,
port=port,
password=password
)
try:
growl.register()
except gntp.notifier.errors.NetworkError:
logger.warn(u"PlexPy Notifiers :: Growl notification failed: network error")
return
except gntp.notifier.errors.AuthError:
logger.warn(u"PlexPy Notifiers :: Growl notification failed: authentication error")
return
# Fix message
message = message.encode(plexpy.SYS_ENCODING, "replace")
# Send it, including an image
image_file = os.path.join(str(plexpy.PROG_DIR),
"data/interfaces/default/images/favicon.png")
with open(image_file, 'rb') as f:
image = f.read()
try:
growl.notify(
noteType='New Event',
title=event,
description=message,
icon=image
)
logger.info(u"PlexPy Notifiers :: Growl notification sent.")
except gntp.notifier.errors.NetworkError:
logger.warn(u"PlexPy Notifiers :: Growl notification failed: network error")
return
def updateLibrary(self):
# For uniformity reasons not removed
return
def test(self, host, password):
self.enabled = True
self.host = host
self.password = password
self.notify('ZOMG Lazors Pewpewpew!', 'Test Message')
def return_config_options(self):
config_option = [{'label': 'Growl Host',
'value': self.host,
'name': 'growl_host',
'description': 'Your Growl hostname.',
'input_type': 'text'
},
{'label': 'Growl Password',
'value': self.password,
'name': 'growl_password',
'description': 'Your Growl password.',
'input_type': 'password'
}
]
return config_option
class PROWL(object):
"""
Prowl notifications.
"""
def __init__(self):
self.enabled = plexpy.CONFIG.PROWL_ENABLED
self.keys = plexpy.CONFIG.PROWL_KEYS
self.priority = plexpy.CONFIG.PROWL_PRIORITY
def conf(self, options):
return cherrypy.config['config'].get('Prowl', options)
def notify(self, message, event):
if not message or not event:
return
http_handler = HTTPSConnection("api.prowlapp.com")
data = {'apikey': plexpy.CONFIG.PROWL_KEYS,
'application': 'PlexPy',
'event': event.encode("utf-8"),
'description': message.encode("utf-8"),
'priority': plexpy.CONFIG.PROWL_PRIORITY}
http_handler.request("POST",
"/publicapi/add",
headers={'Content-type': "application/x-www-form-urlencoded"},
body=urlencode(data))
response = http_handler.getresponse()
request_status = response.status
if request_status == 200:
logger.info(u"PlexPy Notifiers :: Prowl notification sent.")
return True
elif request_status == 401:
logger.warn(u"PlexPy Notifiers :: Prowl notification failed: %s" % response.reason)
return False
else:
logger.warn(u"PlexPy Notifiers :: Prowl notification failed.")
return False
def updateLibrary(self):
# For uniformity reasons not removed
return
def test(self, keys, priority):
self.enabled = True
self.keys = keys
self.priority = priority
self.notify('ZOMG Lazors Pewpewpew!', 'Test Message')
def return_config_options(self):
config_option = [{'label': 'Prowl API Key',
'value': self.keys,
'name': 'prowl_keys',
'description': 'Your Prowl API key.',
'input_type': 'text'
},
{'label': 'Priority',
'value': self.priority,
'name': 'prowl_priority',
'description': 'Set the priority.',
'input_type': 'select',
'select_options': {-2: -2, -1: -1, 0: 0, 1: 1, 2: 2}
}
]
return config_option
class XBMC(object):
"""
XBMC notifications
"""
def __init__(self):
self.hosts = plexpy.CONFIG.XBMC_HOST
self.username = plexpy.CONFIG.XBMC_USERNAME
self.password = plexpy.CONFIG.XBMC_PASSWORD
def _sendhttp(self, host, command):
url_command = urllib.urlencode(command)
url = host + '/xbmcCmds/xbmcHttp/?' + url_command
if self.password:
return request.request_content(url, auth=(self.username, self.password))
else:
return request.request_content(url)
def _sendjson(self, host, method, params={}):
data = [{'id': 0, 'jsonrpc': '2.0', 'method': method, 'params': params}]
headers = {'Content-Type': 'application/json'}
url = host + '/jsonrpc'
if self.password:
response = request.request_json(url, method="post", data=json.dumps(data), headers=headers, auth=(self.username, self.password))
else:
response = request.request_json(url, method="post", data=json.dumps(data), headers=headers)
if response:
return response[0]['result']
def notify(self, subject=None, message=None):
hosts = [x.strip() for x in self.hosts.split(',')]
header = subject
message = message
time = "3000" # in ms
for host in hosts:
logger.info(u"PlexPy Notifiers :: Sending notification command to XMBC @ " + host)
try:
version = self._sendjson(host, 'Application.GetProperties', {'properties': ['version']})['version']['major']
if version < 12: # Eden
notification = header + "," + message + "," + time
notifycommand = {'command': 'ExecBuiltIn', 'parameter': 'Notification(' + notification + ')'}
request = self._sendhttp(host, notifycommand)
else: # Frodo
params = {'title': header, 'message': message, 'displaytime': int(time)}
request = self._sendjson(host, 'GUI.ShowNotification', params)
if not request:
raise Exception
else:
logger.info(u"PlexPy Notifiers :: XBMC notification sent.")
except Exception:
logger.warn(u"PlexPy Notifiers :: XBMC notification filed.")
def return_config_options(self):
config_option = [{'label': 'XBMC Host:Port',
'value': self.hosts,
'name': 'xbmc_host',
'description': 'Host running XBMC (e.g. http://localhost:8080). Separate multiple hosts with commas (,).',
'input_type': 'text'
},
{'label': 'XBMC Username',
'value': self.username,
'name': 'xbmc_username',
'description': 'Your XBMC username.',
'input_type': 'text'
},
{'label': 'XBMC Password',
'value': self.password,
'name': 'xbmc_password',
'description': 'Your XMBC password.',
'input_type': 'password'
}
]
return config_option
class Plex(object):
def __init__(self):
self.client_hosts = plexpy.CONFIG.PLEX_CLIENT_HOST
self.username = plexpy.CONFIG.PLEX_USERNAME
self.password = plexpy.CONFIG.PLEX_PASSWORD
def _sendhttp(self, host, command):
username = self.username
password = self.password
url_command = urllib.urlencode(command)
url = host + '/xbmcCmds/xbmcHttp/?' + url_command
req = urllib2.Request(url)
if password:
base64string = base64.encodestring('%s:%s' % (username, password)).replace('\n', '')
req.add_header("Authorization", "Basic %s" % base64string)
# logger.info(u"PlexPy Notifiers :: Plex url: %s" % url)
try:
handle = urllib2.urlopen(req)
except Exception as e:
logger.error(u"PlexPy Notifiers :: Error opening Plex url: %s" % e)
return
response = handle.read().decode(plexpy.SYS_ENCODING)
return response
def notify(self, subject=None, message=None):
hosts = [x.strip() for x in self.client_hosts.split(',')]
header = subject
message = message
time = "3000" # in ms
for host in hosts:
logger.info(u"PlexPy Notifiers :: Sending notification command to Plex Media Server @ " + host)
try:
notification = header + "," + message + "," + time
notifycommand = {'command': 'ExecBuiltIn', 'parameter': 'Notification(' + notification + ')'}
request = self._sendhttp(host, notifycommand)
if not request:
raise Exception
else:
logger.info(u"PlexPy Notifiers :: Plex notification sent.")
except:
logger.warn(u"PlexPy Notifiers :: Plex notification failed.")
def return_config_options(self):
config_option = [{'label': 'Plex Client Host:Port',
'value': self.client_hosts,
'name': 'plex_client_host',
'description': 'Host running Plex Client (eg. http://192.168.1.100:3000).',
'input_type': 'text'
},
{'label': 'Plex Username',
'value': self.username,
'name': 'plex_username',
'description': 'Username of your Plex client API (blank for none).',
'input_type': 'text'
},
{'label': 'Plex Password',
'value': self.password,
'name': 'plex_password',
'description': 'Password of your Plex client API (blank for none).',
'input_type': 'password'
}
]
return config_option
class NMA(object):
def __init__(self):
self.api = plexpy.CONFIG.NMA_APIKEY
self.nma_priority = plexpy.CONFIG.NMA_PRIORITY
def notify(self, subject=None, message=None):
if not subject or not message:
return
title = 'PlexPy'
api = plexpy.CONFIG.NMA_APIKEY
nma_priority = plexpy.CONFIG.NMA_PRIORITY
# logger.debug(u"NMA title: " + title)
# logger.debug(u"NMA API: " + api)
# logger.debug(u"NMA Priority: " + str(nma_priority))
event = subject
# logger.debug(u"NMA event: " + event)
# logger.debug(u"NMA message: " + message)
batch = False
p = pynma.PyNMA()
keys = api.split(',')
p.addkey(keys)
if len(keys) > 1:
batch = True
response = p.push(title, event, message, priority=nma_priority, batch_mode=batch)
if not response[api][u'code'] == u'200':
logger.warn(u"PlexPy Notifiers :: NotifyMyAndroid notification failed.")
return False
else:
logger.info(u"PlexPy Notifiers :: NotifyMyAndroid notification sent.")
return True
def return_config_options(self):
config_option = [{'label': 'NotifyMyAndroid API Key',
'value': plexpy.CONFIG.NMA_APIKEY,
'name': 'nma_apikey',
'description': 'Your NotifyMyAndroid API key. Separate multiple api keys with commas.',
'input_type': 'text'
},
{'label': 'Priority',
'value': plexpy.CONFIG.NMA_PRIORITY,
'name': 'nma_priority',
'description': 'Set the priority.',
'input_type': 'select',
'select_options': {-2: -2, -1: -1, 0: 0, 1: 1, 2: 2}
}
]
return config_option
class PUSHBULLET(object):
def __init__(self):
self.apikey = plexpy.CONFIG.PUSHBULLET_APIKEY
self.deviceid = plexpy.CONFIG.PUSHBULLET_DEVICEID
self.channel_tag = plexpy.CONFIG.PUSHBULLET_CHANNEL_TAG
def conf(self, options):
return cherrypy.config['config'].get('PUSHBULLET', options)
def notify(self, message, subject):
if not message or not subject:
return
http_handler = HTTPSConnection("api.pushbullet.com")
data = {'type': "note",
'title': subject.encode("utf-8"),
'body': message.encode("utf-8")}
# Can only send to a device or channel, not both.
if self.deviceid:
data['device_iden'] = self.deviceid
elif self.channel_tag:
data['channel_tag'] = self.channel_tag
http_handler.request("POST",
"/v2/pushes",
headers={'Content-type': "application/json",
'Authorization': 'Basic %s' % base64.b64encode(plexpy.CONFIG.PUSHBULLET_APIKEY + ":")},
body=json.dumps(data))
response = http_handler.getresponse()
request_status = response.status
# logger.debug(u"PushBullet response status: %r" % request_status)
# logger.debug(u"PushBullet response headers: %r" % response.getheaders())
# logger.debug(u"PushBullet response body: %r" % response.read())
if request_status == 200:
logger.info(u"PlexPy Notifiers :: PushBullet notification sent.")
return True
elif request_status >= 400 and request_status < 500:
logger.warn(u"PlexPy Notifiers :: PushBullet notification failed: %s" % response.reason)
return False
else:
logger.warn(u"PlexPy Notifiers :: PushBullet notification failed.")
return False
def test(self, apikey, deviceid):
self.enabled = True
self.apikey = apikey
self.deviceid = deviceid
self.notify('Main Screen Activate', 'Test Message')
def get_devices(self):
if plexpy.CONFIG.PUSHBULLET_APIKEY:
http_handler = HTTPSConnection("api.pushbullet.com")
http_handler.request("GET", "/v2/devices",
headers={'Content-type': "application/json",
'Authorization': 'Basic %s' % base64.b64encode(plexpy.CONFIG.PUSHBULLET_APIKEY + ":")})
response = http_handler.getresponse()
request_status = response.status
if request_status == 200:
data = json.loads(response.read())
devices = data.get('devices', [])
devices = {d['iden']: d['nickname'] for d in devices if d['active']}
devices.update({'': ''})
return devices
elif request_status >= 400 and request_status < 500:
logger.warn(u"PlexPy Notifiers :: Unable to retrieve Pushbullet devices list: %s" % response.reason)
return {'': ''}
else:
logger.warn(u"PlexPy Notifiers :: Unable to retrieve Pushbullet devices list.")
return {'': ''}
else:
return {'': ''}
def return_config_options(self):
config_option = [{'label': 'Pushbullet API Key',
'value': self.apikey,
'name': 'pushbullet_apikey',
'description': 'Your Pushbullet API key.',
'input_type': 'text'
},
{'label': 'Device',
'value': self.deviceid,
'name': 'pushbullet_deviceid',
'description': 'Set your Pushbullet device. If set, will override channel tag. ' \
'Leave blank to notify on all devices.',
'input_type': 'select',
'select_options': self.get_devices()
},
{'label': 'Channel',
'value': self.channel_tag,
'name': 'pushbullet_channel_tag',
'description': 'A channel tag (optional).',
'input_type': 'text'
}
]
return config_option
class PUSHALOT(object):
def __init__(self):
self.api_key = plexpy.CONFIG.PUSHALOT_APIKEY
def notify(self, message, event):
if not message or not event:
return
pushalot_authorizationtoken = plexpy.CONFIG.PUSHALOT_APIKEY
# logger.debug(u"Pushalot event: " + event)
# logger.debug(u"Pushalot message: " + message)
# logger.debug(u"Pushalot api: " + pushalot_authorizationtoken)
http_handler = HTTPSConnection("pushalot.com")
data = {'AuthorizationToken': pushalot_authorizationtoken,
'Title': event.encode('utf-8'),
'Body': message.encode("utf-8")}
http_handler.request("POST",
"/api/sendmessage",
headers={'Content-type': "application/x-www-form-urlencoded"},
body=urlencode(data))
response = http_handler.getresponse()
request_status = response.status
# logger.debug(u"Pushalot response status: %r" % request_status)
# logger.debug(u"Pushalot response headers: %r" % response.getheaders())
# logger.debug(u"Pushalot response body: %r" % response.read())
if request_status == 200:
logger.info(u"PlexPy Notifiers :: Pushalot notification sent.")
return True
elif request_status == 410:
logger.warn(u"PlexPy Notifiers :: Pushalot notification failed: %s" % response.reason)
return False
else:
logger.warn(u"PlexPy Notifiers :: Pushalot notification failed.")
return False
def return_config_options(self):
config_option = [{'label': 'Pushalot API Key',
'value': plexpy.CONFIG.PUSHALOT_APIKEY,
'name': 'pushalot_apikey',
'description': 'Your Pushalot API key.',
'input_type': 'text'
}
]
return config_option
class PUSHOVER(object):
def __init__(self):
self.enabled = plexpy.CONFIG.PUSHOVER_ENABLED
self.application_token = plexpy.CONFIG.PUSHOVER_APITOKEN
self.keys = plexpy.CONFIG.PUSHOVER_KEYS
self.priority = plexpy.CONFIG.PUSHOVER_PRIORITY
self.sound = plexpy.CONFIG.PUSHOVER_SOUND
def conf(self, options):
return cherrypy.config['config'].get('Pushover', options)
def notify(self, message, event):
if not message or not event:
return
http_handler = HTTPSConnection("api.pushover.net")
data = {'token': self.application_token,
'user': plexpy.CONFIG.PUSHOVER_KEYS,
'title': event.encode("utf-8"),
'message': message.encode("utf-8"),
'sound': plexpy.CONFIG.PUSHOVER_SOUND,
'priority': plexpy.CONFIG.PUSHOVER_PRIORITY}
http_handler.request("POST",
"/1/messages.json",
headers={'Content-type': "application/x-www-form-urlencoded"},
body=urlencode(data))
response = http_handler.getresponse()
request_status = response.status
# logger.debug(u"Pushover response status: %r" % request_status)
# logger.debug(u"Pushover response headers: %r" % response.getheaders())
# logger.debug(u"Pushover response body: %r" % response.read())
if request_status == 200:
logger.info(u"PlexPy Notifiers :: Pushover notification sent.")
return True
elif request_status >= 400 and request_status < 500:
logger.warn(u"PlexPy Notifiers :: Pushover notification failed: %s" % response.reason)
return False
else:
logger.warn(u"PlexPy Notifiers :: Pushover notification failed.")
return False
def updateLibrary(self):
# For uniformity reasons not removed
return
def test(self, keys, priority, sound):
self.enabled = True
self.keys = keys
self.priority = priority
self.sound = sound
self.notify('Main Screen Activate', 'Test Message')
def get_sounds(self):
if plexpy.CONFIG.PUSHOVER_APITOKEN:
http_handler = HTTPSConnection("api.pushover.net")
http_handler.request("GET", "/1/sounds.json?token=" + self.application_token)
response = http_handler.getresponse()
request_status = response.status
if request_status == 200:
data = json.loads(response.read())
sounds = data.get('sounds', {})
sounds.update({'': ''})
return sounds
elif request_status >= 400 and request_status < 500:
logger.warn(u"PlexPy Notifiers :: Unable to retrieve Pushover notification sounds list: %s" % response.reason)
return {'': ''}
else:
logger.warn(u"PlexPy Notifiers :: Unable to retrieve Pushover notification sounds list.")
return {'': ''}
else:
return {'': ''}
def return_config_options(self):
config_option = [{'label': 'Pushover API Token',
'value': plexpy.CONFIG.PUSHOVER_APITOKEN,
'name': 'pushover_apitoken',
'description': 'Your Pushover API token.',
'input_type': 'text'
},
{'label': 'Pushover User or Group Key',
'value': self.keys,
'name': 'pushover_keys',
'description': 'Your Pushover user or group key.',
'input_type': 'text'
},
{'label': 'Priority',
'value': self.priority,
'name': 'pushover_priority',
'description': 'Set the priority.',
'input_type': 'select',
'select_options': {-2: -2, -1: -1, 0: 0, 1: 1, 2: 2}
},
{'label': 'Sound',
'value': self.sound,
'name': 'pushover_sound',
'description': 'Set the notification sound. Leave blank for the default sound.',
'input_type': 'select',
'select_options': self.get_sounds()
}
]
return config_option
class TwitterNotifier(object):
REQUEST_TOKEN_URL = 'https://api.twitter.com/oauth/request_token'
ACCESS_TOKEN_URL = 'https://api.twitter.com/oauth/access_token'
AUTHORIZATION_URL = 'https://api.twitter.com/oauth/authorize'
SIGNIN_URL = 'https://api.twitter.com/oauth/authenticate'
def __init__(self):
self.access_token = plexpy.CONFIG.TWITTER_ACCESS_TOKEN
self.access_token_secret = plexpy.CONFIG.TWITTER_ACCESS_TOKEN_SECRET
self.consumer_key = plexpy.CONFIG.TWITTER_CONSUMER_KEY
self.consumer_secret = plexpy.CONFIG.TWITTER_CONSUMER_SECRET
def notify(self, subject, message):
if not subject or not message:
return
else:
self._send_tweet(subject + ': ' + message)
def test_notify(self):
return self._send_tweet("This is a test notification from PlexPy at " + helpers.now())
def _get_authorization(self):
oauth_consumer = oauth.Consumer(key=self.consumer_key, secret=self.consumer_secret)
oauth_client = oauth.Client(oauth_consumer)
logger.info("PlexPy Notifiers :: Requesting temp token from Twitter")
resp, content = oauth_client.request(self.REQUEST_TOKEN_URL, 'GET')
if resp['status'] != '200':
logger.warn("PlexPy Notifiers :: Invalid respond from Twitter requesting temp token: %s" % resp['status'])
else:
request_token = dict(parse_qsl(content))
plexpy.CONFIG.TWITTER_ACCESS_TOKEN = request_token['oauth_token']
plexpy.CONFIG.TWITTER_ACCESS_TOKEN_SECRET = request_token['oauth_token_secret']
return self.AUTHORIZATION_URL + "?oauth_token=" + request_token['oauth_token']
def _get_credentials(self, key):
request_token = {}
request_token['oauth_token'] = plexpy.CONFIG.TWITTER_ACCESS_TOKEN
request_token['oauth_token_secret'] = plexpy.CONFIG.TWITTER_ACCESS_TOKEN_SECRET
request_token['oauth_callback_confirmed'] = 'true'
token = oauth.Token(request_token['oauth_token'], request_token['oauth_token_secret'])
token.set_verifier(key)
# logger.debug(u"Generating and signing request for an access token using key " + key)
oauth_consumer = oauth.Consumer(key=self.consumer_key, secret=self.consumer_secret)
# logger.debug(u"oauth_consumer: " + str(oauth_consumer))
oauth_client = oauth.Client(oauth_consumer, token)
# logger.debug(u"oauth_client: " + str(oauth_client))
resp, content = oauth_client.request(self.ACCESS_TOKEN_URL, method='POST', body='oauth_verifier=%s' % key)
# logger.debug(u"resp, content: " + str(resp) + ',' + str(content))
access_token = dict(parse_qsl(content))
# logger.debug(u"access_token: " + str(access_token))
# logger.debug(u"resp[status] = " + str(resp['status']))
if resp['status'] != '200':
logger.error(u"PlexPy Notifiers :: The request for a Twitter token did not succeed: " + str(resp['status']), logger.ERROR)
return False
else:
# logger.info(u"PlexPy Notifiers :: Your Twitter Access Token key: %s" % access_token['oauth_token'])
# logger.info(u"PlexPy Notifiers :: Access Token secret: %s" % access_token['oauth_token_secret'])
plexpy.CONFIG.TWITTER_ACCESS_TOKEN = access_token['oauth_token']
plexpy.CONFIG.TWITTER_ACCESS_TOKEN_SECRET = access_token['oauth_token_secret']
plexpy.CONFIG.write()
return True
def _send_tweet(self, message=None):
consumer_key = self.consumer_key
consumer_secret = self.consumer_secret
access_token = self.access_token
access_token_secret = self.access_token_secret
# logger.info(u"PlexPy Notifiers :: Sending tweet: " + message)
api = twitter.Api(consumer_key, consumer_secret, access_token, access_token_secret)
try:
api.PostUpdate(message)
logger.info(u"PlexPy Notifiers :: Twitter notification sent.")
except Exception as e:
logger.warn(u"PlexPy Notifiers :: Twitter notification failed: %s" % e)
return False
return True
def return_config_options(self):
config_option = [{'label': 'Instructions',
'description': 'Step 1: Visit <a href="https://apps.twitter.com/" target="_blank"> \
Twitter Apps</a> to <strong>Create New App</strong>. A vaild "Website" is not required.<br>\
Step 2: Go to <strong>Keys and Access Tokens</strong> and click \
<strong>Create my access token</strong>.<br>\
Step 3: Fill in the <strong>Consumer Key</strong>, <strong>Consumer Secret</strong>, \
<strong>Access Token</strong>, and <strong>Access Token Secret</strong> below.',
'input_type': 'help'
},
{'label': 'Twitter Consumer Key',
'value': self.consumer_key,
'name': 'twitter_consumer_key',
'description': 'Your Twitter consumer key.',
'input_type': 'text'
},
{'label': 'Twitter Consumer Secret',
'value': self.consumer_secret,
'name': 'twitter_consumer_secret',
'description': 'Your Twitter consumer secret.',
'input_type': 'text'
},
{'label': 'Twitter Access Token',
'value': self.access_token,
'name': 'twitter_access_token',
'description': 'Your Twitter access token.',
'input_type': 'text'
},
{'label': 'Twitter Access Token Secret',
'value': self.access_token_secret,
'name': 'twitter_access_token_secret',
'description': 'Your Twitter access token secret.',
'input_type': 'text'
}
]
return config_option
class OSX_NOTIFY(object):
def __init__(self):
try:
self.objc = __import__("objc")
self.AppKit = __import__("AppKit")
except:
# logger.error(u"PlexPy Notifiers :: Cannot load OSX Notifications agent.")
pass
def validate(self):
try:
self.objc = __import__("objc")
self.AppKit = __import__("AppKit")
return True
except:
return False
def swizzle(self, cls, SEL, func):
old_IMP = cls.instanceMethodForSelector_(SEL)
def wrapper(self, *args, **kwargs):
return func(self, old_IMP, *args, **kwargs)
new_IMP = self.objc.selector(wrapper, selector=old_IMP.selector,
signature=old_IMP.signature)
self.objc.classAddMethod(cls, SEL, new_IMP)
def notify(self, title, subtitle=None, text=None, sound=True, image=None):
try:
self.swizzle(self.objc.lookUpClass('NSBundle'),
b'bundleIdentifier',
self.swizzled_bundleIdentifier)
NSUserNotification = self.objc.lookUpClass('NSUserNotification')
NSUserNotificationCenter = self.objc.lookUpClass('NSUserNotificationCenter')
NSAutoreleasePool = self.objc.lookUpClass('NSAutoreleasePool')
if not NSUserNotification or not NSUserNotificationCenter:
return False
pool = NSAutoreleasePool.alloc().init()
notification = NSUserNotification.alloc().init()
notification.setTitle_(title)
if subtitle:
notification.setSubtitle_(subtitle)
if text:
notification.setInformativeText_(text)
if sound:
notification.setSoundName_("NSUserNotificationDefaultSoundName")
if image:
source_img = self.AppKit.NSImage.alloc().initByReferencingFile_(image)
notification.setContentImage_(source_img)
# notification.set_identityImage_(source_img)
notification.setHasActionButton_(False)
notification_center = NSUserNotificationCenter.defaultUserNotificationCenter()
notification_center.deliverNotification_(notification)
logger.info(u"PlexPy Notifiers :: OSX Notify notification sent.")
del pool
return True
except Exception as e:
logger.warn(u"PlexPy Notifiers :: OSX notification failed: %s" % e)
return False
def swizzled_bundleIdentifier(self, original, swizzled):
return 'ade.plexpy.osxnotify'
def return_config_options(self):
config_option = [{'label': 'Register Notify App',
'value': plexpy.CONFIG.OSX_NOTIFY_APP,
'name': 'osx_notify_app',
'description': 'Enter the path/application name to be registered with the '
'Notification Center, default is /Applications/PlexPy.',
'input_type': 'text'
}
]
return config_option
class BOXCAR(object):
def __init__(self):
self.url = 'https://new.boxcar.io/api/notifications'
self.token = plexpy.CONFIG.BOXCAR_TOKEN
self.sound = plexpy.CONFIG.BOXCAR_SOUND
def notify(self, title, message):
if not title or not message:
return
try:
data = urllib.urlencode({
'user_credentials': plexpy.CONFIG.BOXCAR_TOKEN,
'notification[title]': title.encode('utf-8'),
'notification[long_message]': message.encode('utf-8'),
'notification[sound]': plexpy.CONFIG.BOXCAR_SOUND
})
req = urllib2.Request(self.url)
handle = urllib2.urlopen(req, data)
handle.close()
logger.info(u"PlexPy Notifiers :: Boxcar2 notification sent.")
return True
except urllib2.URLError as e:
logger.warn(u"PlexPy Notifiers :: Boxcar2 notification failed: %s" % e)
return False
def get_sounds(self):
sounds = {'': '',
'beep-crisp': 'Beep (Crisp)',
'beep-soft': 'Beep (Soft)',
'bell-modern': 'Bell (Modern)',
'bell-one-tone': 'Bell (One Tone)',
'bell-simple': 'Bell (Simple)',
'bell-triple': 'Bell (Triple)',
'bird-1': 'Bird (1)',
'bird-2': 'Bird (2)',
'boing': 'Boing',
'cash': 'Cash',
'clanging': 'Clanging',
'detonator-charge': 'Detonator Charge',
'digital-alarm': 'Digital Alarm',
'done': 'Done',
'echo': 'Echo',
'flourish': 'Flourish',
'harp': 'Harp',
'light': 'Light',
'magic-chime':'Magic Chime',
'magic-coin': 'Magic Coin',
'no-sound': 'No Sound',
'notifier-1': 'Notifier (1)',
'notifier-2': 'Notifier (2)',
'notifier-3': 'Notifier (3)',
'orchestral-long': 'Orchestral (Long)',
'orchestral-short': 'Orchestral (Short)',
'score': 'Score',
'success': 'Success',
'up': 'Up'}
return sounds
def return_config_options(self):
config_option = [{'label': 'Boxcar Access Token',
'value': plexpy.CONFIG.BOXCAR_TOKEN,
'name': 'boxcar_token',
'description': 'Your Boxcar access token.',
'input_type': 'text'
},
{'label': 'Sound',
'value': self.sound,
'name': 'boxcar_sound',
'description': 'Set the notification sound. Leave blank for the default sound.',
'input_type': 'select',
'select_options': self.get_sounds()
}
]
return config_option
class Email(object):
def __init__(self):
pass
def notify(self, subject, message):
if not subject or not message:
return
message = MIMEText(message, 'plain', "utf-8")
message['Subject'] = subject
message['From'] = email.utils.formataddr((plexpy.CONFIG.EMAIL_FROM_NAME, plexpy.CONFIG.EMAIL_FROM))
message['To'] = plexpy.CONFIG.EMAIL_TO
message['CC'] = plexpy.CONFIG.EMAIL_CC
recipients = [x.strip() for x in plexpy.CONFIG.EMAIL_TO.split(';')] \
+ [x.strip() for x in plexpy.CONFIG.EMAIL_CC.split(';')] \
+ [x.strip() for x in plexpy.CONFIG.EMAIL_BCC.split(';')]
recipients = filter(None, recipients)
try:
mailserver = smtplib.SMTP(plexpy.CONFIG.EMAIL_SMTP_SERVER, plexpy.CONFIG.EMAIL_SMTP_PORT)
if (plexpy.CONFIG.EMAIL_TLS):
mailserver.starttls()
mailserver.ehlo()
if plexpy.CONFIG.EMAIL_SMTP_USER:
mailserver.login(plexpy.CONFIG.EMAIL_SMTP_USER, plexpy.CONFIG.EMAIL_SMTP_PASSWORD)
mailserver.sendmail(plexpy.CONFIG.EMAIL_FROM, recipients, message.as_string())
mailserver.quit()
logger.info(u"PlexPy Notifiers :: Email notification sent.")
return True
except Exception as e:
logger.warn(u"PlexPy Notifiers :: Email notification failed: %s" % e)
return False
def return_config_options(self):
config_option = [{'label': 'From Name',
'value': plexpy.CONFIG.EMAIL_FROM_NAME,
'name': 'email_from_name',
'description': 'The name of the sender.',
'input_type': 'text'
},
{'label': 'From',
'value': plexpy.CONFIG.EMAIL_FROM,
'name': 'email_from',
'description': 'The email address of the sender.',
'input_type': 'text'
},
{'label': 'To',
'value': plexpy.CONFIG.EMAIL_TO,
'name': 'email_to',
'description': 'The email address(es) of the recipients, separated by semicolons (;).',
'input_type': 'text'
},
{'label': 'CC',
'value': plexpy.CONFIG.EMAIL_CC,
'name': 'email_cc',
'description': 'The email address(es) to CC, separated by semicolons (;).',
'input_type': 'text'
},
{'label': 'BCC',
'value': plexpy.CONFIG.EMAIL_BCC,
'name': 'email_bcc',
'description': 'The email address(es) to BCC, separated by semicolons (;).',
'input_type': 'text'
},
{'label': 'SMTP Server',
'value': plexpy.CONFIG.EMAIL_SMTP_SERVER,
'name': 'email_smtp_server',
'description': 'Host for the SMTP server.',
'input_type': 'text'
},
{'label': 'SMTP Port',
'value': plexpy.CONFIG.EMAIL_SMTP_PORT,
'name': 'email_smtp_port',
'description': 'Port for the SMTP server.',
'input_type': 'number'
},
{'label': 'SMTP User',
'value': plexpy.CONFIG.EMAIL_SMTP_USER,
'name': 'email_smtp_user',
'description': 'User for the SMTP server.',
'input_type': 'text'
},
{'label': 'SMTP Password',
'value': plexpy.CONFIG.EMAIL_SMTP_PASSWORD,
'name': 'email_smtp_password',
'description': 'Password for the SMTP server.',
'input_type': 'password'
},
{'label': 'TLS',
'value': plexpy.CONFIG.EMAIL_TLS,
'name': 'email_tls',
'description': 'Does the server use encryption.',
'input_type': 'checkbox'
}
]
return config_option
class IFTTT(object):
def __init__(self):
self.apikey = plexpy.CONFIG.IFTTT_KEY
self.event = plexpy.CONFIG.IFTTT_EVENT
def notify(self, message, subject):
if not message or not subject:
return
http_handler = HTTPSConnection("maker.ifttt.com")
data = {'value1': subject.encode("utf-8"),
'value2': message.encode("utf-8")}
# logger.debug(u"Ifttt SENDING: %s" % json.dumps(data))
http_handler.request("POST",
"/trigger/%s/with/key/%s" % (self.event, self.apikey),
headers={'Content-type': "application/json"},
body=json.dumps(data))
response = http_handler.getresponse()
request_status = response.status
# logger.debug(u"Ifttt response status: %r" % request_status)
# logger.debug(u"Ifttt response headers: %r" % response.getheaders())
# logger.debug(u"Ifttt response body: %r" % response.read())
if request_status == 200:
logger.info(u"PlexPy Notifiers :: Ifttt notification sent.")
return True
elif request_status >= 400 and request_status < 500:
logger.warn(u"PlexPy Notifiers :: Ifttt notification failed: %s" % response.reason)
return False
else:
logger.warn(u"PlexPy Notifiers :: Ifttt notification failed.")
return False
def test(self):
return self.notify('PlexPy', 'Test Message')
def return_config_options(self):
config_option = [{'label': 'Ifttt Maker Channel Key',
'value': self.apikey,
'name': 'ifttt_key',
'description': 'Your Ifttt key. You can get a key from <a href="https://ifttt.com/maker" target="_blank">here</a>.',
'input_type': 'text'
},
{'label': 'Ifttt Event',
'value': self.event,
'name': 'ifttt_event',
'description': 'The Ifttt maker event to fire. The notification subject and body will be sent'
' as value1 and value2 respectively.',
'input_type': 'text'
}
]
return config_option
class TELEGRAM(object):
def __init__(self):
self.enabled = plexpy.CONFIG.TELEGRAM_ENABLED
self.bot_token = plexpy.CONFIG.TELEGRAM_BOT_TOKEN
self.chat_id = plexpy.CONFIG.TELEGRAM_CHAT_ID
def conf(self, options):
return cherrypy.config['config'].get('Telegram', options)
def notify(self, message, event):
if not message or not event:
return
http_handler = HTTPSConnection("api.telegram.org")
data = {'chat_id': self.chat_id,
'text': event.encode('utf-8') + ': ' + message.encode("utf-8")}
http_handler.request("POST",
"/bot%s/%s" % (self.bot_token, "sendMessage"),
headers={'Content-type': "application/x-www-form-urlencoded"},
body=urlencode(data))
response = http_handler.getresponse()
request_status = response.status
if request_status == 200:
logger.info(u"PlexPy Notifiers :: Telegram notification sent.")
return True
elif request_status >= 400 and request_status < 500:
logger.warn(u"PlexPy Notifiers :: Telegram notification failed: %s" % response.reason)
return False
else:
logger.warn(u"PlexPy Notifiers :: Telegram notification failed.")
return False
def updateLibrary(self):
# For uniformity reasons not removed
return
def test(self, bot_token, chat_id):
self.enabled = True
self.bot_token = bot_token
self.chat_id = chat_id
self.notify('Main Screen Activate', 'Test Message')
def return_config_options(self):
config_option = [{'label': 'Telegram Bot Token',
'value': self.bot_token,
'name': 'telegram_bot_token',
'description': 'Your Telegram bot token. Contact <a href="http://telegram.me/BotFather" target="_blank">@BotFather</a> on Telegram to get one.',
'input_type': 'text'
},
{'label': 'Telegram Chat ID, Group ID, or Channel Username',
'value': self.chat_id,
'name': 'telegram_chat_id',
'description': 'Your Telegram Chat ID, Group ID, or @channelusername. Contact <a href="http://telegram.me/myidbot" target="_blank">@myidbot</a> on Telegram to get an ID.',
'input_type': 'text'
}
]
return config_option
class SLACK(object):
"""
Slack Notifications
"""
def __init__(self):
self.enabled = plexpy.CONFIG.SLACK_ENABLED
self.slack_hook = plexpy.CONFIG.SLACK_HOOK
self.channel = plexpy.CONFIG.SLACK_CHANNEL
self.username = plexpy.CONFIG.SLACK_USERNAME
self.icon_emoji = plexpy.CONFIG.SLACK_ICON_EMOJI
def conf(self, options):
return cherrypy.config['config'].get('Slack', options)
def notify(self, message, event):
if not message or not event:
return
http_handler = HTTPSConnection("hooks.slack.com")
data = {'text': event.encode('utf-8') + ': ' + message.encode("utf-8")}
if self.channel != '': data['channel'] = self.channel
if self.username != '': data['username'] = self.username
if self.icon_emoji != '':
if urlparse(self.icon_emoji).scheme == '':
data['icon_emoji'] = self.icon_emoji
else:
data['icon_url'] = self.icon_url
url = urlparse(self.slack_hook).path
http_handler.request("POST",
url,
headers={'Content-type': "application/x-www-form-urlencoded"},
body=json.dumps(data))
response = http_handler.getresponse()
request_status = response.status
if request_status == 200:
logger.info(u"PlexPy Notifiers :: Slack notification sent.")
return True
elif request_status >= 400 and request_status < 500:
logger.warn(u"PlexPy Notifiers :: Slack notification failed: %s" % response.reason)
return False
else:
logger.warn(u"PlexPy Notifiers :: Slack notification failed.")
return False
def updateLibrary(self):
#For uniformity reasons not removed
return
def test(self):
self.enabled = True
return self.notify('Main Screen Activate', 'Test Message')
def return_config_options(self):
config_option = [{'label': 'Slack Hook',
'value': self.slack_hook,
'name': 'slack_hook',
'description': 'Your Slack incoming webhook.',
'input_type': 'text'
},
{'label': 'Slack Channel',
'value': self.channel,
'name': 'slack_channel',
'description': 'Your Slack channel name (begin with \'#\'). Leave blank for webhook integration default.',
'input_type': 'text'
},
{'label': 'Slack Username',
'value': self.username,
'name': 'slack_username',
'description': 'The Slack username which will be shown. Leave blank for webhook integration default.',
'input_type': 'text'
},
{'label': 'Slack Icon',
'value': self.icon_emoji,
'description': 'The icon you wish to show, use Slack emoji or image url. Leave blank for webhook integration default.',
'name': 'slack_icon_emoji',
'input_type': 'text'
}
]
return config_option
class Scripts(object):
def __init__(self, **kwargs):
self.script_exts = ('.bat', '.cmd', '.exe', '.php', '.pl', '.py', '.pyw', '.rb', '.sh')
def conf(self, options):
return cherrypy.config['config'].get('Scripts', options)
def updateLibrary(self):
# For uniformity reasons not removed
return
def test(self, subject, message, *args, **kwargs):
self.notify(subject, message, *args, **kwargs)
return
def list_scripts(self):
scriptdir = plexpy.CONFIG.SCRIPTS_FOLDER
scripts = {'': ''}
if scriptdir and not os.path.exists(scriptdir):
return scripts
for root, dirs, files in os.walk(scriptdir):
for f in files:
name, ext = os.path.splitext(f)
if ext in self.script_exts:
rfp = os.path.join(os.path.relpath(root, scriptdir), f)
fp = os.path.join(root, f)
scripts[fp] = rfp
return scripts
def notify(self, subject='', message='', notify_action='', script_args=[], *args, **kwargs):
"""
Args:
subject(string, optional): Head text,
message(string, optional): Body text,
notify_action(string): 'play'
script_args(list): ["python2", '-p', '-zomg']
"""
logger.debug(u"PlexPy Notifiers :: Trying to run notify script, action: %s, arguments: %s" %
(notify_action if notify_action else None, script_args if script_args else None))
if not plexpy.CONFIG.SCRIPTS_FOLDER:
return
# Make sure we use the correct script..
if notify_action == 'play':
script = plexpy.CONFIG.SCRIPTS_ON_PLAY_SCRIPT
elif notify_action == 'stop':
script = plexpy.CONFIG.SCRIPTS_ON_STOP_SCRIPT
elif notify_action == 'pause':
script = plexpy.CONFIG.SCRIPTS_ON_PAUSE_SCRIPT
elif notify_action == 'resume':
script = plexpy.CONFIG.SCRIPTS_ON_RESUME_SCRIPT
elif notify_action == 'buffer':
script = plexpy.CONFIG.SCRIPTS_ON_BUFFER_SCRIPT
elif notify_action == 'extdown':
script = plexpy.CONFIG.SCRIPTS_ON_EXTDOWN_SCRIPT
elif notify_action == 'extup':
script = plexpy.CONFIG.SCRIPTS_ON_EXTUP_SCRIPT
elif notify_action == 'intdown':
script = plexpy.CONFIG.SCRIPTS_ON_INTDOWN_SCRIPT
elif notify_action == 'intup':
script = plexpy.CONFIG.SCRIPTS_ON_INTUP_SCRIPT
elif notify_action == 'created':
script = plexpy.CONFIG.SCRIPTS_ON_CREATED_SCRIPT
elif notify_action == 'watched':
script = plexpy.CONFIG.SCRIPTS_ON_WATCHED_SCRIPT
else:
# For manual scripts
script = kwargs.get('script', '')
# Don't try to run the script if the action does not have one
if notify_action and not script:
logger.debug(u"PlexPy Notifiers :: No script selected for action %s, exiting..." % notify_action)
return
elif not script:
logger.debug(u"PlexPy Notifiers :: No script selected, exiting...")
return
name, ext = os.path.splitext(script)
if ext == '.py':
prefix = 'python'
elif ext == '.pyw':
prefix = 'pythonw'
elif ext == '.php':
prefix = 'php'
elif ext == '.pl':
prefix = 'perl'
elif ext == '.rb':
prefix = 'ruby'
else:
prefix = ''
if os.name == 'nt':
script = script.encode(plexpy.SYS_ENCODING, 'ignore')
if prefix:
script = [prefix, script]
else:
script = [script]
# For manual notifications
if script_args and isinstance(script_args, basestring):
# attemps for format it for the user
script_args = shlex.split(script_args)
# Windows handles unicode very badly.
# https://bugs.python.org/issue19264
if script_args and os.name == 'nt':
script_args = [s.encode(plexpy.SYS_ENCODING, 'ignore') for s in script_args]
# Allow overrides for shitty systems
if prefix and script_args:
if script_args[0] in ['python2', 'python', 'pythonw', 'php', 'ruby', 'perl']:
script[0] = script_args[0]
del script_args[0]
script.extend(script_args)
logger.debug(u"PlexPy Notifiers :: Full script is: %s" % script)
try:
p = subprocess.Popen(script, stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
cwd=plexpy.CONFIG.SCRIPTS_FOLDER)
out, error = p.communicate()
status = p.returncode
if out and status:
out = out.strip()
logger.debug(u"PlexPy Notifiers :: Script returned: %s" % out)
if error:
error = error.strip()
logger.error(u"PlexPy Notifiers :: Script error: %s" % error)
else:
logger.info(u"PlexPy Notifiers :: Script notification sent.")
except OSError as e:
logger.error(u"PlexPy Notifiers :: Failed to run script: %s" % e)
def return_config_options(self):
config_option = [{'label': 'Warning',
'description': '<strong>Script notifications are currently experimental!</strong><br><br>\
Supported file types: ' + ', '.join(self.script_exts),
'input_type': 'help'
},
{'label': 'Script folder',
'value': plexpy.CONFIG.SCRIPTS_FOLDER,
'name': 'scripts_folder',
'description': 'Add your script folder.',
'input_type': 'text',
},
{'label': 'Playback Start',
'value': plexpy.CONFIG.SCRIPTS_ON_PLAY_SCRIPT,
'name': 'scripts_on_play_script',
'description': 'Choose the script for on play.',
'input_type': 'select',
'select_options': self.list_scripts()
},
{'label': 'Playback Stop',
'value': plexpy.CONFIG.SCRIPTS_ON_STOP_SCRIPT,
'name': 'scripts_on_stop_script',
'description': 'Choose the script for on stop.',
'input_type': 'select',
'select_options': self.list_scripts()
},
{'label': 'Playback Pause',
'value': plexpy.CONFIG.SCRIPTS_ON_PAUSE_SCRIPT,
'name': 'scripts_on_pause_script',
'description': 'Choose the script for on pause.',
'input_type': 'select',
'select_options': self.list_scripts()
},
{'label': 'Playback Resume',
'value': plexpy.CONFIG.SCRIPTS_ON_RESUME_SCRIPT,
'name': 'scripts_on_resume_script',
'description': 'Choose the script for on resume.',
'input_type': 'select',
'select_options': self.list_scripts()
},
{'label': 'Watched',
'value': plexpy.CONFIG.SCRIPTS_ON_WATCHED_SCRIPT,
'name': 'scripts_on_watched_script',
'description': 'Choose the script for on watched.',
'input_type': 'select',
'select_options': self.list_scripts()
},
{'label': 'Buffer Warnings',
'value': plexpy.CONFIG.SCRIPTS_ON_BUFFER_SCRIPT,
'name': 'scripts_on_buffer_script',
'description': 'Choose the script for buffer warnings.',
'input_type': 'select',
'select_options': self.list_scripts()
},
{'label': 'Recently Added',
'value': plexpy.CONFIG.SCRIPTS_ON_CREATED_SCRIPT,
'name': 'scripts_on_created_script',
'description': 'Choose the script for recently added.',
'input_type': 'select',
'select_options': self.list_scripts()
},
{'label': 'Plex Remote Access Down',
'value': plexpy.CONFIG.SCRIPTS_ON_EXTDOWN_SCRIPT,
'name': 'scripts_on_extdown_script',
'description': 'Choose the script for Plex remote access down.',
'input_type': 'select',
'select_options': self.list_scripts()
},
{'label': 'Plex Server Down',
'value': plexpy.CONFIG.SCRIPTS_ON_INTDOWN_SCRIPT,
'name': 'scripts_on_intdown_script',
'description': 'Choose the script for Plex server down.',
'input_type': 'select',
'select_options': self.list_scripts()
},
{'label': 'Plex Remote Access Back Up',
'value': plexpy.CONFIG.SCRIPTS_ON_EXTUP_SCRIPT,
'name': 'scripts_on_extup_script',
'description': 'Choose the script for Plex remote access back up.',
'input_type': 'select',
'select_options': self.list_scripts()
},
{'label': 'Plex Server Back Up',
'value': plexpy.CONFIG.SCRIPTS_ON_INTUP_SCRIPT,
'name': 'scripts_on_intup_script',
'description': 'Choose the script for Plex server back up.',
'input_type': 'select',
'select_options': self.list_scripts()
}
]
return config_option
class FacebookNotifier(object):
def __init__(self):
self.redirect_uri = plexpy.CONFIG.FACEBOOK_REDIRECT_URI
self.app_id = plexpy.CONFIG.FACEBOOK_APP_ID
self.app_secret = plexpy.CONFIG.FACEBOOK_APP_SECRET
self.group_id = plexpy.CONFIG.FACEBOOK_GROUP
def notify(self, subject, message):
if not subject or not message:
return
else:
self._post_facebook(subject + ': ' + message)
def test_notify(self):
return self._post_facebook(u"PlexPy Notifiers :: This is a test notification from PlexPy at " + helpers.now())
def _get_authorization(self):
return facebook.auth_url(app_id=self.app_id,
canvas_url=self.redirect_uri + '/facebookStep2',
perms=['user_managed_groups','publish_actions'])
def _get_credentials(self, code):
logger.info(u"PlexPy Notifiers :: Requesting access token from Facebook")
try:
# Request user access token
api = facebook.GraphAPI(version='2.5')
response = api.get_access_token_from_code(code=code,
redirect_uri=self.redirect_uri + '/facebookStep2',
app_id=self.app_id,
app_secret=self.app_secret)
access_token = response['access_token']
# Request extended user access token
api = facebook.GraphAPI(access_token=access_token, version='2.5')
response = api.extend_access_token(app_id=self.app_id,
app_secret=self.app_secret)
access_token = response['access_token']
plexpy.CONFIG.FACEBOOK_TOKEN = access_token
plexpy.CONFIG.write()
except Exception as e:
logger.error(u"PlexPy Notifiers :: Error requesting Facebook access token: %s" % e)
return False
return True
def _post_facebook(self, message=None):
access_token = plexpy.CONFIG.FACEBOOK_TOKEN
group_id = plexpy.CONFIG.FACEBOOK_GROUP
if group_id:
api = facebook.GraphAPI(access_token=access_token, version='2.5')
try:
api.put_wall_post(profile_id=group_id, message=message)
logger.info(u"PlexPy Notifiers :: Facebook notification sent.")
except Exception as e:
logger.warn(u"PlexPy Notifiers :: Error sending Facebook post: %s" % e)
return False
return True
else:
logger.warn(u"PlexPy Notifiers :: Error sending Facebook post: No Facebook Group ID provided.")
return False
def return_config_options(self):
config_option = [{'label': 'Instructions',
'description': '<strong>Facebook notifications are currently experimental!</strong><br><br> \
Step 1: Visit <a href="https://developers.facebook.com/apps/" target="_blank"> \
Facebook Developers</a> to add a new app using <strong>basic setup</strong>.<br>\
Step 2: Go to <strong>Settings > Basic</strong> and fill in a \
<strong>Contact Email</strong>.<br>\
Step 3: Go to <strong>Settings > Advanced</strong> and fill in \
<strong>Valid OAuth redirect URIs</strong> with your PlexPy URL (i.e. http://localhost:8181).<br>\
Step 4: Go to <strong>App Review</strong> and toggle public to <strong>Yes</strong>.<br>\
Step 5: Fill in the <strong>PlexPy URL</strong> below with the exact same URL from Step 3.<br>\
Step 6: Fill in the <strong>App ID</strong> and <strong>App Secret</strong> below.<br>\
Step 7: Click the <strong>Request Authorization</strong> button below.<br> \
Step 8: Fill in the <strong>Group ID</strong> below.',
'input_type': 'help'
},
{'label': 'PlexPy URL',
'value': self.redirect_uri,
'name': 'facebook_redirect_uri',
'description': 'Your PlexPy URL. This will tell Facebook where to redirect you after authorization.',
'input_type': 'text'
},
{'label': 'Facebook App ID',
'value': self.app_id,
'name': 'facebook_app_id',
'description': 'Your Facebook app ID.',
'input_type': 'text'
},
{'label': 'Facebook App Secret',
'value': self.app_secret,
'name': 'facebook_app_secret',
'description': 'Your Facebook app secret.',
'input_type': 'text'
},
{'label': 'Request Authorization',
'value': 'Request Authorization',
'name': 'facebookStep1',
'description': 'Request Facebook authorization. (Ensure you allow the browser pop-up).',
'input_type': 'button'
},
{'label': 'Facebook Group ID',
'value': self.group_id,
'name': 'facebook_group',
'description': 'Your Facebook Group ID.',
'input_type': 'text'
}
]
return config_option
|
Hellowlol/plexpy
|
plexpy/notifiers.py
|
Python
|
gpl-3.0
| 89,796
|
[
"VisIt"
] |
83846daa3e8dac9ec401d3dc2aa61bcf3e75a844c0717b69d92eec2e44d50c12
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Copyright 2020 University of Liège
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from sph.helpers import *
if __name__=="__main__":
boxL = 2.
Lfloor = 0.7
Lwater = 0.5
sep = 0.05/2
kernel = Kernel('cubic', False) # 'cubic', 'quadratic' or 'quintic'
law = EqState('liquid') # 'gas' or 'liquid'
# parameters
model = Model()
model.kernel = kernel
model.law = law
model.h_0 = 0.06/2 # initial smoothing length [m]
model.c_0 = 35.0 # initial speed of sound [m/s]
model.rho_0 = 1000.0 # initial density [kg/m^3]
model.dom_dim = boxL # domain size (cube)
model.alpha = 0.5 # artificial viscosity factor 1
model.beta = 0.0 # artificial viscosity factor 2
model.maxTime = 3.0 # simulation time
model.saveInt = 0.01/2 # save interval
# mobile particles
cube = Cube( o=(((boxL-Lwater)/2),((boxL-Lwater)/2), ((boxL)/2)+0.5), L=(Lwater,Lwater,Lwater), rho=model.rho_0, s=sep)
model.addMobile(cube.generate())
# fixed particles
#obstacle
plane = Cube( o=(((boxL-Lfloor)/2),((boxL-Lfloor)/2), (boxL/2)), L=(Lfloor,Lfloor,sep), rho=model.rho_0, s=sep)
model.addFixed(plane.generate())
#floor
plane = Cube( o=(0,0,0), L=(boxL,boxL,sep), rho=model.rho_0, s=sep)
model.addFixed(plane.generate())
#x=0
plane = Cube( o=(0,0,2*sep), L=(sep,boxL,boxL-2*sep), rho=model.rho_0, s=sep)
model.addFixed(plane.generate())
#y=0
plane = Cube( o=(2*sep,0,2*sep), L=(boxL-4*sep,sep,boxL-2*sep), rho=model.rho_0, s=sep)
model.addFixed(plane.generate())
#x=L
plane = Cube( o=(boxL-sep,0,2*sep), L=(sep,boxL,boxL-2*sep), rho=model.rho_0, s=sep)
model.addFixed(plane.generate())
#y=L
plane = Cube( o=(2*sep,boxL-sep,2*sep), L=(boxL-4*sep,sep,boxL-2*sep), rho=model.rho_0, s=sep)
model.addFixed(plane.generate())
# run SPH model
print(model)
model.run()
# convert to VTK
import sph.gui as gui
gui.ToParaview(verb=False).convertall()
|
rboman/progs
|
classes/sph0/louis/tests/waterdrop3.py
|
Python
|
apache-2.0
| 2,672
|
[
"VTK"
] |
efd398450f025f35a89fcf6204f76abe7d8e309d012df4473402f8d780f82e00
|
import scipy as SP
import numpy as NP
import scipy.linalg as LA
import scipy.optimize as opt
import scipy.stats as ST
import scipy.special as SS
from fastlmm.util.mingrid import *
from fastlmm.util.util import *
import time
import pdb
from bin2kernel import Bin2Kernel
from bin2kernel import makeBin2KernelAsEstimator
from bin2kernel import Bin2KernelLaplaceLinearN
from bin2kernel import getFastestBin2Kernel
from bin2kernel import Bin2KernelEPLinearN
def rotate(A, rotationMatrix,transposeRot=True):
'''
Apply an eigenvalue decomposition rotation matrix (Assummes that the rotation-matrix has orthogonal cols)
takes care of low rank structure in A
result = eig[1].T * A
'''
[Nr,k] = rotationMatrix.shape
[N,M] = A.shape
if Nr!=N:
raise Exception("rotation and A are not aligned A.shape = [%i,%i], rotation.shape = [%i,%i]" % (N,M,Nr,k))
if k>Nr:
raise Exception("rotation matrix has more columns than rows = [%i,%i], rotation.shape = [%i,%i]" % (N,M,Nr,k))
res = rotationMatrix.T.dot(A)
if k<Nr:
resLowRank = A - rotationMatrix.dot(res)
res = [res,resLowRank]
return res
def rotSymm(A,eig,delta=1.0,gamma=1.0, exponent = -0.5, forceSymm = True):
'''
do the rotation with K_0 to get a matrix square root
'''
[N,M]=A.shape
[Nr,k] = eig[1].shape
if Nr!=N:
raise Exception("rotation and A are not aligned A.shape = [%i,%i], rotation.shape = [%i,%i]" % (N,M,Nr,k))
if symmetric and N!=M:
raise Exception("A is not symmetric. A.shape = [%i,%i], rotation.shape = [%i,%i]" % (N,M,Nr,k))
if k>Nr:
raise Exception("rotation matrix has more columns than rows = [%i,%i], rotation.shape = [%i,%i]" % (N,M,Nr,k))
res = eig[1].T.dot(A)
deltaPow = myPow(delta,exponent,minVal)
diag = eig[0]*gamma + delta
diag = myPow(diag,exponent,minVal=0.0)
if Nr>k:
diag-=deltaPow
res = dotDiag(res,diag)
res = A-eig[1].dot(res)
else:
res = dotDiag(res,diag)
if forceSymm:
res = eig[1].dot(res)
return res
def dotDiag(A, diag, minVal = None, symmetric = False, exponent = 1.0):
'''
Multiply by a diagonal matrix build from
result = diag.dot(A)
'''
[N,M] = A.shape
[Nd] = diag.shape
if symmetric and N!=M:
raise Exception("A is not symmetric. A.shape = [%i,%i], rotation.shape = [%i,%i]" % (N,M,Nr,k))
if Nd!=N:
raise Exception("Matrices misaligned A.shape = [%i,%i], diag.shape = [%i,%i]" % (N,M,Nr,k))
diagcorr = myPow(diag,exponent,minVal)
result = A * NP.lib.stride_tricks.as_strided(diagcorr, (diagcorr.size,A.shape[1]), (diagcorr.itemsize,0))
if symmetric:
result = dotDiag(A=A.T,diag=diagcorr,checkPos=False,minVal = minVal,symmetric = False, exponent = 1.0).T
return result
def myPow(A,exponent,minVal=None):
'''
compute a power, with efficient computation of common powers
'''
if exponent == 1.0:
Aexp = A
elif exponent == -1.0:
Aexp = 1.0/A
elif exponent == 2.0:
Aexp = A*A
elif exponent == 0.5:
Aexp = NP.sqrt(A)
elif exponent == -0.5:
Aexp = 1.0/NP.sqrt(A)
elif exponent == -2.0:
Aexp = 1.0/(A*A)
else:
Aexp = NP.power(A,exponent)
if minVal is not None:
i_Null = A<=minVal
Aexp[i_Null]=0.0
return Aexp
class lmm2k(object):
'''
linear mixed model with up to two kernels
N(y | X*beta ; sigma2(gamma0*K0 + gamma1*K1 + I ) ),
where
K0 = G0*G0^T
K1 = G1*G1^T
'''
__slots__ = ["G0","G1","Y","X","K0","K1","K","U","S","UX","Uy","UUX","UW","UUW","UUy","pos0","pos1","gamma0","gamma1","delta","exclude_idx","forcefullrank","numcalls","Xstar","Kstar","Kstar_star","UKstar","UUKstar","Gstar"]
def __init__(self,forcefullrank=False):
'''
Input:
forcefullrank : if True, then the code always computes K and runs cubically
(False)
'''
self.X=None
self.Y=None
self.G0=None
self.G1=None
self.K0=None
self.K1=None
self.gamma0=1.0
self.gamma1=1.0
self.delta=1.0
self.eig0=None
self.eig1=None
self.Yrot = None
self.Xrot = None
self.K1rot = None
self.G1rot = None
def setX(self, X):
'''
set the fixed effects X (covariates).
The Kernel has to be set in advance by first calling setG() or setK().
--------------------------------------------------------------------------
Input:
X : [N*D] 2-dimensional array of covariates
--------------------------------------------------------------------------
'''
self.X = X
self.Xrot = None
def setY(self, Y):
'''
set the phenotype y.
The Kernel has to be set in advance by first calling setG() or setK().
--------------------------------------------------------------------------
Input:
Y : [NxP] P-dimensional array of phenotype values
--------------------------------------------------------------------------
'''
self.Y = Y
self.Yrot = None
def setG0(self, G0):
'''
set the Kernel K0 from G0.
This has to be done before setting the data setX() and setY().
----------------------------------------------------------------------------
Input:
G0 : [N*k0] array of random effects
-----------------------------------------------------------------------------
'''
k = G0.shape[1]
N = G0.shape[0]
self.eig0 = None
self.eig1 = None
self.K1rot = None
self.Yrot = None
self.Xrot = None
if ((not self.forcefullrank) and (k<N)):
self.G0 = G0
else:
K0=G0.dot(G0.T);
self.setK0(K0=K0)
def setK0(self, K0):
'''
set the background Kernel K0.
--------------------------------------------------------------------------
Input:
K0 : [N*N] array, random effects covariance (positive semi-definite)
--------------------------------------------------------------------------
'''
self.K0 = K0
self.G1 = None
self.eig0 = None
self.eig1 = None
self.K1rot = None
self.Yrot = None
self.Xrot = None
self.G1rot = None
self.K1rot = None
def setG1(self, G1):
'''
set the Kernel K1 from G1.
----------------------------------------------------------------------------
Input:
G0 : [N*k0] array of random effects
-----------------------------------------------------------------------------
'''
k = self.G1.shape[1]
N = self.G0.shape[0]
self.eig1 = None
self.G1rot = None
if ((not self.forcefullrank) and (k<N)):
#it is faster using the eigen decomposition of G.T*G but this is more accurate
self.G1 = G1
else:
K1=self.G1.dot(self.G1.T);
self.setK1(K1=K1)
pass
def setK1(self, K1):
'''
set the foreground Kernel K1.
This has to be done before setting the data setX() and setY().
--------------------------------------------------------------------------
Input:
K1 : [N*N] array, random effects covariance (positive semi-definite)
--------------------------------------------------------------------------
'''
self.eig1 = None
self.G1 = None
self.K1 = K1
self.K1rot= None
self.G1rot= None
def setVariances(self,gamma0=None,gamma1=None,delta=None):
if gamma0 is not None:
#background model changed, foreground rotation (U1,S1) changes:
self.gamma0 = gamma0
self.eig1 = None
self.Xrot = None
self.Yrot = None
self.K1rot = None
self.G1rot = None
if delta is not None:
#background model changed, foreground rotation (U1,S1) changes:
self.delta = delta
self.eig1 = None
self.Xrot = None
self.Yrot = None
self.K1rot = None
self.G1rot = None
if gamma1 is not None:
#foreground model changed
self.gamma1 = gamma1
def getEig1(self):
'''
'''
if self.eig1 is None:
#compute eig1
if self.K1 is not None:
if self.K1rot is None:
self.K1rot = rotSymm(self.K1, eig = self.eig0, exponent = -0.5, gamma=self.gamma0,delta = self.delta,forceSymm = False)
self.K1rot = rotSymm(self.K1rot.T, eig = self.eig0, exponent = -0.5, gamma=self.gamma0,delta = self.delta,forceSymm = False)
self.eig1 = LA.eigh(self.K1rot)
elif self.G1 is not None:
[N,k] = self.G1.shape
if self.G1rot is None:
self.G1rot = rotSymm(self.G1, eig = self.eig0, exponent = -0.5, gamma=self.gamma0,delta = self.delta,forceSymm = False)
try:
[U,S,V] = LA.svd(self.G1rot,full_matrices = False)
self.eig1 = [S*S,U]
except LA.LinAlgError: # revert to Eigenvalue decomposition
print "Got SVD exception, trying eigenvalue decomposition of square of G. Note that this is a little bit less accurate"
[S_,V_] = LA.eigh(self.G1rot.T.dot(self.G1rot))
S_nonz=(S_>0.0)
S1 = S_[S_nonz]
U1=self.G1rot.dot(V_[:,S_nonz]/SP.sqrt(S1))
self.eig1=[S1,U1]
return self.eig1
def getEig0(self):
'''
'''
if self.eig0 is None:
#compute eig0
if self.K0 is not None:
self.eig1 = LA.eigh(self.K0)
elif self.G1 is not None:
[N,k] = self.G0.shape
try:
[U,S,V] = LA.svd(self.G0,full_matrices = False)
self.eig0 = [S*S,U]
except LA.LinAlgError: # revert to Eigenvalue decomposition
print "Got SVD exception, trying eigenvalue decomposition of square of G. Note that this is a little bit less accurate"
[S_,V_] = LA.eigh(self.G0.T.dot(self.G0))
S_nonz=(S_>0.0)
S0 = S_[S_nonz]
U0=self.G0.dot(V_[:,S_nonz]/SP.sqrt(S0))
self.eig0=[S0,U0]
return self.eig1
def set_exclude_idx(self, idx):
'''
Set the indices of SNPs to be removed
--------------------------------------------------------------------------
Input:
idx : [k_up: number of SNPs to be removed] holds the indices of SNPs to be removed
--------------------------------------------------------------------------
'''
self.exclude_idx = idx
def findGamma1givenGamma0(self, gamma0 = 0.0, nGridGamma1=10, minGamma1=0.0, maxGamma1=10000.0, **kwargs):
'''
Find the optimal h2 for a given K. Note that this is the single kernel case. So there is no a2.
(default maxH2 value is set to a value smaller than 1 to avoid loss of positive definiteness of the final model covariance)
--------------------------------------------------------------------------
Input:
nGridH2 : number of h2-grid points to evaluate the negative log-likelihood at
minH2 : minimum value for h2 optmization
maxH2 : maximum value for h2 optmization
--------------------------------------------------------------------------
Output:
dictionary containing the model parameters at the optimal h2
--------------------------------------------------------------------------
'''
self.setVariances(gamma0 = gamma0)
resmin=[None]
def f(x,resmin=resmin,**kwargs):
#self.setVariances(gamma1=x)
res = self.nLLeval(gamma1=x,**kwargs)
if (resmin[0] is None) or (res['nLL']<resmin[0]['nLL']):
resmin[0]=res
self.setGamma1[x]
return res['nLL']
min = minimize1D(f=f, nGrid=nGridGamma1, minval=minGamma1, maxval=maxGamma1 )
return resmin[0]
def findGammas(self, nGridGamma0=10, minGamma0=0.0, maxGamma0=10000.0, nGridGamma1=10, minGamma1=0.0, maxGamma1=10000.0,verbose=False, **kwargs):
'''
Find the optimal gamma0 and gamma1, such that K=gamma0*K0+gamma1*K1. Performs a double loop optimization (could be expensive for large grid-sizes)
--------------------------------------------------------------------------
Input:
nGridGamma0 : number of a2-grid points to evaluate the negative log-likelihood at
minGamma0 : minimum value for a2 optmization
maxGamma0 : maximum value for a2 optmization
nGridGamma1 : number of h2-grid points to evaluate the negative log-likelihood at
minGamma1 : minimum value for h2 optmization
maxGamma1 : maximum value for h2 optmization
--------------------------------------------------------------------------
Output:
dictionary containing the model parameters at the optimal gamma0 and gamma1
--------------------------------------------------------------------------
'''
self.numcalls=0
resmin=[None]
def f(x,resmin=resmin, nGridGamma1=nGridGamma1, minGamma1=minGamma1, maxGamma1=maxGamma1,**kwargs):
self.numcalls+=1
t0=time.time()
res = self.findGamma1givenGamma0(gamma0=x, nGridGamma1=nGridGamma1, minGamma1=minGamma1, maxGamma1=maxGamma1,**kwargs)
if (resmin[0] is None) or (res['nLL']<resmin[0]['nLL']):
resmin[0]=res
t1=time.time()
#print "one objective function call took %.2f seconds elapsed" % (t1-t0)
#import pdb; pdb.set_trace()
return res['nLL']
if verbose: print "findGammas"
min = minimize1D(f=f, nGrid=nGridGamma1, minval=minGamma1, maxval=maxGamma1,verbose=False)
#print "numcalls to innerLoopTwoKernel= " + str(self.numcalls)
return resmin[0]
def nLLeval(self,REML=True, gamma1 = None, dof = None, scale = 1.0):
'''
evaluate -ln( N( U^T*y | U^T*X*beta , h2*S + (1-h2)*I ) ),
where ((1-a2)*K0 + a2*K1) = USU^T
--------------------------------------------------------------------------
Input:
h2 : mixture weight between K and Identity (environmental noise)
REML : boolean
if True : compute REML
if False : compute ML
dof : Degrees of freedom of the Multivariate student-t
(default None uses multivariate Normal likelihood)
logdelta: log(delta) allows to optionally parameterize in delta space
delta : delta allows tomoptionally parameterize in delta space
scale : Scale parameter the multiplies the Covariance matrix (default 1.0)
--------------------------------------------------------------------------
Output dictionary:
'nLL' : negative log-likelihood
'sigma2' : the model variance sigma^2
'beta' : [D*1] array of fixed effects weights beta
'h2' : mixture weight between Covariance and noise
'REML' : True: REML was computed, False: ML was computed
'a2' : mixture weight between K0 and K1
'dof' : Degrees of freedom of the Multivariate student-t
(default None uses multivariate Normal likelihood)
'scale' : Scale parameter that multiplies the Covariance matrix (default 1.0)
--------------------------------------------------------------------------
'''
if gamma1 is None:
gamma1=self.gamma1
if (gamma1<0.0):
return {'nLL':3E20,
'gamma1':gamma1,
'delta' : self.delta,
'gamma0': self.gamma0,
'dof' : dof,
'REML':REML,
'scale':scale}
k=self.S1.shape[0]
N=self.Y.shape[0]
D=self.X.shape[1]
Sd = (gamma1*self.S1+1.0)*scale
UXS = dotDiag(self.UX, Sd, minVal = 0.0, symmetric = False, exponent = -1.0)
UYS = dotDiag(self.UY, Sd, minVal = 0.0, symmetric = False, exponent = -1.0)
XKX = UXS.T.dot(self.UX)
XKy = UXS.T.dot(self.Uy)
yKy = UyS.T.dot(self.Uy)
logdetK = SP.log(Sd).sum()
if (k<N):#low rank part
# determine normalization factor
denom = (1.0*scale)
XKX += self.UUX.T.dot(self.UUX)/(denom)
XKy += self.UUX.T.dot(self.UUy)/(denom)
yKy += self.UUy.T.dot(self.UUy)/(denom)
logdetK+=(N-k) * SP.log(denom)
# proximal contamination (see Supplement Note 2: An Efficient Algorithm for Avoiding Proximal Contamination)
# available at: http://www.nature.com/nmeth/journal/v9/n6/extref/nmeth.2037-S1.pdf
# exclude SNPs from the RRM in the likelihood evaluation
if len(self.exclude_idx) > 0:
raise Exception("not implemented")
num_exclude = len(self.exclude_idx)
# consider only excluded SNPs
G_exclude = self.G[:,self.exclude_idx]
self.UW = self.U.T.dot(G_exclude) # needed for proximal contamination
UWS = self.UW / NP.lib.stride_tricks.as_strided(Sd, (Sd.size,num_exclude), (Sd.itemsize,0))
assert UWS.shape == (k, num_exclude)
WW = NP.eye(num_exclude) - UWS.T.dot(self.UW)
WX = UWS.T.dot(self.UX)
Wy = UWS.T.dot(self.Uy)
assert WW.shape == (num_exclude, num_exclude)
assert WX.shape == (num_exclude, D)
assert Wy.shape == (num_exclude,)
if (k<N):#low rank part
self.UUW = G_exclude - self.U.dot(self.UW)
WW += self.UUW.T.dot(self.UUW)/denom
WX += self.UUW.T.dot(self.UUX)/denom
Wy += self.UUW.T.dot(self.UUy)/denom
#TODO: do cholesky, if fails do eigh
# compute inverse efficiently
[S_WW,U_WW] = LA.eigh(WW)
UWX = U_WW.T.dot(WX)
UWy = U_WW.T.dot(Wy)
assert UWX.shape == (num_exclude, D)
assert UWy.shape == (num_exclude,)
# compute S_WW^{-1} * UWX
WX = UWX / NP.lib.stride_tricks.as_strided(S_WW, (S_WW.size,UWX.shape[1]), (S_WW.itemsize,0))
# compute S_WW^{-1} * UWy
Wy = UWy / S_WW
# determinant update
logdetK += SP.log(S_WW).sum()
assert WX.shape == (num_exclude, D)
assert Wy.shape == (num_exclude,)
# perform updates (instantiations for a and b in Equation (1.5) of Supplement)
yKy += UWy.T.dot(Wy)
XKy += UWX.T.dot(Wy)
XKX += UWX.T.dot(WX)
#######
[SxKx,UxKx]= LA.eigh(XKX)
i_pos = SxKx>1E-10
beta = SP.dot(UxKx[:,i_pos],(SP.dot(UxKx[:,i_pos].T,XKy)/SxKx[i_pos]))
r2 = yKy-XKy.dot(beta)
if dof is None:#Use the Multivariate Gaussian
if REML:
XX = self.X.T.dot(self.X)
[Sxx,Uxx]= LA.eigh(XX)
logdetXX = SP.log(Sxx).sum()
logdetXKX = SP.log(SxKx).sum()
sigma2 = r2 / (N - D)
nLL = 0.5 * ( logdetK + logdetXKX - logdetXX + (N-D) * ( SP.log(2.0*SP.pi*sigma2) + 1 ) )
else:
sigma2 = r2 / (N)
nLL = 0.5 * ( logdetK + N * ( SP.log(2.0*SP.pi*sigma2) + 1 ) )
result = {
'nLL':nLL,
'sigma2':sigma2,
'beta':beta,
'gamma1':gamma1,
'REML':REML,
'gamma0':self.gamma0,
'delta':self.delta,
'scale':scale
}
else:#Use multivariate student-t
if REML:
XX = self.X.T.dot(self.X)
[Sxx,Uxx]= LA.eigh(XX)
logdetXX = SP.log(Sxx).sum()
logdetXKX = SP.log(SxKx).sum()
nLL = 0.5 * ( logdetK + logdetXKX - logdetXX + (dof + (N-D)) * SP.log(1.0+r2/dof) )
nLL += 0.5 * (N-D)*SP.log( dof*SP.pi ) + SS.gammaln( 0.5*dof ) - SS.gammaln( 0.5* (dof + (N-D) ))
else:
nLL = 0.5 * ( logdetK + (dof + N) * SP.log(1.0+r2/dof) )
nLL += 0.5 * N*SP.log( dof*SP.pi ) + SS.gammaln( 0.5*dof ) - SS.gammaln( 0.5* (dof + N ))
result = {
'nLL':nLL,
'dof':dof,
'sigma2':sigma2,
'beta':beta,
'gamma1':gamma1,
'REML':REML,
'gamma0':self.gamma0,
'delta':self.delta,
'scale':scale
}
return result
|
zhonghualiu/FaST-LMM
|
fastlmm/inference/lmm2k.py
|
Python
|
apache-2.0
| 22,357
|
[
"Gaussian"
] |
d343e8425d119b02d6ad76ed83c04ead117c472f2c0183806537d747c6acaa2e
|
from __future__ import print_function # Python 2/3 compatibility
import boto3
import json
import decimal
# Helper class to convert a DynamoDB item to JSON.
class DecimalEncoder(json.JSONEncoder):
def default(self, o):
if isinstance(o, decimal.Decimal):
if o % 1 > 0:
return float(o)
else:
return int(o)
return super(DecimalEncoder, self).default(o)
dynamodb = boto3.resource('dynamodb', region_name='us-west-2', endpoint_url="http://localhost:8000")
table = dynamodb.Table('Movies')
title = "The Big New Movie"
year = 2015
response = table.update_item(
Key={
'year': year,
'title': title
},
UpdateExpression="set info.rating = :r, info.plot=:p, info.actors=:a",
ExpressionAttributeValues={
':r': decimal.Decimal(5.5),
':p': "Everything happens all at once.",
':a': ["Larry", "Moe", "Curly"]
},
ReturnValues="UPDATED_NEW"
)
print("UpdateItem succeeded:")
print(json.dumps(response, indent=4, cls=DecimalEncoder))
|
data-north/datanorth-api
|
examples/MoviesItemOps03.py
|
Python
|
mit
| 1,058
|
[
"MOE"
] |
5501381ba0ef1ccbb25d814331adbb4ef27f5db95317c07c73bf48ee9e4fa1a2
|
#
# Gramps - a GTK+/GNOME based genealogy program
#
# Copyright (C) 2000-2006 Martin Hawlisch, Donald N. Allingham
# Copyright (C) 2008 Brian G. Matherly
# Copyright (C) 2011 Michiel D. Nauta
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#
# $Id$
"Import from vCard (RFC 2426)"
#-------------------------------------------------------------------------
#
# standard python modules
#
#-------------------------------------------------------------------------
import sys
import re
import time
from gramps.gen.const import GRAMPS_LOCALE as glocale
_ = glocale.get_translation().gettext
#------------------------------------------------------------------------
#
# Set up logging
#
#------------------------------------------------------------------------
import logging
LOG = logging.getLogger(".ImportVCard")
#-------------------------------------------------------------------------
#
# GRAMPS modules
#
#-------------------------------------------------------------------------
from gramps.gen.errors import GrampsImportError
from gramps.gen.lib import Address, Date, Event, EventRef, EventType, Name, NameType, Person, Surname, Url, UrlType
from gramps.gen.db import DbTxn
from gramps.gen.plug.utils import OpenFileOrStdin
#-------------------------------------------------------------------------
#
# Support Functions
#
#-------------------------------------------------------------------------
def importData(database, filename, user):
"""Function called by Gramps to import data on persons in VCard format."""
parser = VCardParser(database)
try:
with OpenFileOrStdin(filename) as filehandle:
parser.parse(filehandle)
except EnvironmentError as msg:
user.notify_error(_("%s could not be opened\n") % filename, str(msg))
return
except GrampsImportError as msg:
user.notify_error(_("%s could not be opened\n") % filename, str(msg))
return
return None # This module doesn't provide info about what got imported.
def splitof_nameprefix(name):
"""
Return a (prefix, Surname) tuple by splitting on first uppercase char.
Shame on Python for not supporting [[:upper:]] in re!
"""
look_for_capital = False
for i, char in enumerate(name):
if look_for_capital:
if char.isupper():
return (name[:i].rstrip(), name[i:])
else:
look_for_capital = False
if not char.isalpha():
look_for_capital = True
return ('', name)
def fitin(prototype, receiver, element):
"""
Return the index in string receiver at which element should be inserted
to match part of prototype.
Assume that the part of receiver that is not tested does match.
Don't split to work with lists because element may contain a space!
Example: fitin("Mr. Gaius Julius Caesar", "Gaius Caesar", "Julius") = 6
:param prototype: Partly to be matched by inserting element in receiver.
:type prototype: str
:param receiver: Space separated words that miss words to match prototype.
:type receiver: str
:param element: Words that need to be inserted; error if not in prototype.
:type element: str
:returns: Returns index where element fits in receiver, -1 if receiver
not in prototype, or throws IndexError if element at end receiver.
:rtype: int
"""
receiver_idx = 0
receiver_chunks = receiver.split()
element_idx = prototype.index(element)
i = 0
idx = prototype.find(receiver_chunks[i])
while idx < element_idx:
if idx == -1:
return -1
receiver_idx += len(receiver_chunks[i]) + 1
i += 1
idx = prototype.find(receiver_chunks[i])
return receiver_idx
#-------------------------------------------------------------------------
#
# VCardParser class
#
#-------------------------------------------------------------------------
class VCardParser(object):
"""Class to read data in VCard format from a file."""
DATE_RE = re.compile(r'^(\d{4}-\d{1,2}-\d{1,2})|(?:(\d{4})-?(\d\d)-?(\d\d))')
GROUP_RE = re.compile(r'^(?:[-0-9A-Za-z]+\.)?(.+)$') # see RFC 2425 sec5.8.2
ESCAPE_CHAR = '\\'
TOBE_ESCAPED = ['\\', ',', ';'] # order is important
LINE_CONTINUATION = [' ', '\t']
@staticmethod
def name_value_split(data):
"""Property group.name:value split is on first unquoted colon."""
colon_idx = data.find(':')
if colon_idx < 1:
return ()
quote_count = data.count('"', 0, colon_idx)
while quote_count % 2 == 1:
colon_idx = data.find(':', colon_idx + 1)
quote_count = data.count('"', 0, colon_idx)
group_name, value = data[:colon_idx], data[colon_idx+1:]
name_parts = VCardParser.GROUP_RE.match(group_name)
return (name_parts.group(1), value)
@staticmethod
def unesc(data):
"""Remove VCard escape sequences."""
if type(data) == type('string'):
for char in reversed(VCardParser.TOBE_ESCAPED):
data = data.replace(VCardParser.ESCAPE_CHAR + char, char)
return data
elif type(data) == type([]):
return list(map(VCardParser.unesc, data))
else:
raise TypeError("VCard unescaping is not implemented for "
"data type %s." % str(type(data)))
@staticmethod
def count_escapes(strng):
"""Count the number of escape characters at the end of a string."""
count = 0
for char in reversed(strng):
if char != VCardParser.ESCAPE_CHAR:
return count
count += 1
return count
@staticmethod
def split_unescaped(strng, sep):
"""Split on sep if sep is unescaped."""
strng_parts = strng.split(sep)
for i in reversed(range(len(strng_parts[:]))):
if VCardParser.count_escapes(strng_parts[i]) % 2 == 1:
# the sep was escaped so undo split
appendix = strng_parts.pop(i+1)
strng_parts[i] += sep + appendix
return strng_parts
def __init__(self, dbase):
self.database = dbase
self.formatted_name = ''
self.name_parts = ''
self.next_line = None
self.trans = None
self.version = None
self.person = None
def __get_next_line(self, filehandle):
"""
Read and return the line with the next property of the VCard.
Also if it spans multiple lines (RFC 2425 sec.5.8.1).
"""
line = self.next_line
self.next_line = filehandle.readline()
while self.next_line and self.next_line[0] in self.LINE_CONTINUATION:
line = line.rstrip("\n")
#TODO perhaps next lines superflous because of rU open parameter?
if len(line) > 0 and line[-1] == "\r":
line = line[:-1]
line += self.next_line[1:]
self.next_line = filehandle.readline()
if line:
line = line.strip()
else:
line = None
return line
def parse(self, filehandle):
"""
Prepare the database and parse the input file.
:param filehandle: open file handle positioned at start of the file
"""
tym = time.time()
self.person = None
self.database.disable_signals()
with DbTxn(_("vCard import"), self.database, batch=True) as self.trans:
self._parse_vCard_file(filehandle)
self.database.enable_signals()
self.database.request_rebuild()
tym = time.time() - tym
msg = glocale.get_translation().ngettext('Import Complete: %d second',
'Import Complete: %d seconds', tym ) % tym
LOG.debug(msg)
def _parse_vCard_file(self, filehandle):
"""Read each line of the input file and act accordingly."""
self.next_line = filehandle.readline()
while True:
line = self.__get_next_line(filehandle)
if line is None:
break
if line == "":
continue
if line.find(":") == -1:
continue
line_parts = self.name_value_split(line)
if not line_parts:
continue
# No check for escaped ; because only fields[0] is used.
fields = line_parts[0].split(";")
property_name = fields[0].upper()
if property_name == "BEGIN":
self.next_person()
elif property_name == "END":
self.finish_person()
elif property_name == "VERSION":
self.check_version(fields, line_parts[1])
elif property_name == "FN":
self.add_formatted_name(fields, line_parts[1])
elif property_name == "N":
self.add_name_parts(fields, line_parts[1])
elif property_name == "NICKNAME":
self.add_nicknames(fields, line_parts[1])
elif property_name == "SORT-STRING":
self.add_sortas(fields, line_parts[1])
elif property_name == "ADR":
self.add_address(fields, line_parts[1])
elif property_name == "TEL":
self.add_phone(fields, line_parts[1])
elif property_name == "BDAY":
self.add_birthday(fields, line_parts[1])
elif property_name == "ROLE":
self.add_occupation(fields, line_parts[1])
elif property_name == "URL":
self.add_url(fields, line_parts[1])
elif property_name == "EMAIL":
self.add_email(fields, line_parts[1])
elif property_name == "PRODID":
# Included cause VCards made by Gramps have this prop.
pass
else:
LOG.warn("Token >%s< unknown. line skipped: %s" %
(fields[0],line))
def finish_person(self):
"""All info has been collected, write to database."""
if self.person is not None:
if self.add_name():
self.database.add_person(self.person, self.trans)
self.person = None
def next_person(self):
"""A VCard for another person is started."""
if self.person is not None:
self.finish_person()
LOG.warn("BEGIN property not properly closed by END property, "
"Gramps can't cope with nested VCards.")
self.person = Person()
self.formatted_name = ''
self.name_parts = ''
def check_version(self, fields, data):
"""Check the version of the VCard, only version 3.0 is supported."""
self.version = data
if self.version != "3.0":
raise GrampsImportError(_("Import of VCards version %s is "
"not supported by Gramps.") % self.version)
def add_formatted_name(self, fields, data):
"""Read the FN property of a VCard."""
if not self.formatted_name:
self.formatted_name = self.unesc(str(data)).strip()
def add_name_parts(self, fields, data):
"""Read the N property of a VCard."""
if not self.name_parts:
self.name_parts = data.strip()
def add_name(self):
"""
Add the name to the person.
Returns True on success, False on failure.
"""
if not self.name_parts.strip():
LOG.warn("VCard is malformed missing the compulsory N property, "
"so there is no name; skip it.")
return False
if not self.formatted_name:
LOG.warn("VCard is malformed missing the compulsory FN property, "
"get name from N alone.")
data_fields = self.split_unescaped(self.name_parts, ';')
if len(data_fields) != 5:
LOG.warn("VCard is malformed wrong number of name components.")
name = Name()
name.set_type(NameType(NameType.BIRTH))
if data_fields[0].strip():
# assume first surname is primary
for surname_str in self.split_unescaped(data_fields[0], ','):
surname = Surname()
prefix, sname = splitof_nameprefix(self.unesc(surname_str))
surname.set_surname(sname.strip())
surname.set_prefix(prefix.strip())
name.add_surname(surname)
if len(data_fields) > 1 and data_fields[1].strip():
given_name = ' '.join(self.unesc(
self.split_unescaped(data_fields[1], ',')))
else:
given_name = ''
if len(data_fields) > 2 and data_fields[2].strip():
additional_names = ' '.join(self.unesc(
self.split_unescaped(data_fields[2], ',')))
else:
additional_names = ''
self.add_firstname(given_name.strip(), additional_names.strip(), name)
if len(data_fields) > 3 and data_fields[3].strip():
name.set_title(' '.join(self.unesc(
self.split_unescaped(data_fields[3], ','))))
if len(data_fields) > 4 and data_fields[4].strip():
name.set_suffix(' '.join(self.unesc(
self.split_unescaped(data_fields[4], ','))))
self.person.set_primary_name(name)
return True
def add_firstname(self, given_name, additional_names, name):
"""
Combine given_name and additional_names and add as firstname to name.
If possible try to add given_name as call name.
"""
default = "%s %s" % (given_name, additional_names)
if self.formatted_name:
if given_name:
if additional_names:
given_name_pos = self.formatted_name.find(given_name)
if given_name_pos != -1:
add_names_pos = self.formatted_name.find(additional_names)
if add_names_pos != -1:
if given_name_pos <= add_names_pos:
firstname = default
# Uncertain if given name is used as callname
else:
firstname = "%s %s" % (additional_names,
given_name)
name.set_call_name(given_name)
else:
idx = fitin(self.formatted_name, additional_names,
given_name)
if idx == -1:
# Additional names is not in formatted name
firstname = default
else: # Given name in middle of additional names
firstname = "%s%s %s" % (additional_names[:idx],
given_name, additional_names[idx:])
name.set_call_name(given_name)
else: # Given name is not in formatted name
firstname = default
else: # There are no additional_names
firstname = given_name
else: # There is no given_name
firstname = additional_names
else: # There is no formatted name
firstname = default
name.set_first_name(firstname.strip())
return
def add_nicknames(self, fields, data):
"""Read the NICKNAME property of a VCard."""
for nick in self.split_unescaped(data, ','):
nickname = nick.strip()
if nickname:
name = Name()
name.set_nick_name(self.unesc(nickname))
self.person.add_alternate_name(name)
def add_sortas(self, fields, data):
"""Read the SORT-STRING property of a VCard."""
#TODO
pass
def add_address(self, fields, data):
"""Read the ADR property of a VCard."""
data_fields = self.split_unescaped(data, ';')
data_fields = [x.strip() for x in self.unesc(data_fields)]
if ''.join(data_fields):
addr = Address()
def add_street(strng):
if strng:
already = addr.get_street()
if already:
addr.set_street("%s %s" % (already, strng))
else:
addr.set_street(strng)
addr.add_street = add_street
set_func = ['add_street', 'add_street', 'add_street', 'set_city',
'set_state', 'set_postal_code', 'set_country']
for i, data in enumerate(data_fields):
if i >= len(set_func):
break
getattr(addr, set_func[i])(data)
self.person.add_address(addr)
def add_phone(self, fields, data):
"""Read the TEL property of a VCard."""
tel = data.strip()
if tel:
addr = Address()
addr.set_phone(self.unesc(tel))
self.person.add_address(addr)
def add_birthday(self, fields, data):
"""Read the BDAY property of a VCard."""
date_str = data.strip()
date_match = VCardParser.DATE_RE.match(date_str)
if date_match:
if date_match.group(2):
date_str = "%s-%s-%s" % (date_match.group(2),
date_match.group(3), date_match.group(4))
else:
date_str = date_match.group(1)
event = Event()
event.set_type(EventType(EventType.BIRTH))
date = Date()
date.set_yr_mon_day(*[int(x, 10) for x in date_str.split('-')])
event.set_date_object(date)
self.database.add_event(event, self.trans)
event_ref = EventRef()
event_ref.set_reference_handle(event.get_handle())
self.person.set_birth_ref(event_ref)
else:
LOG.warn("Date %s not in appropriate format yyyy-mm-dd, "
"line skipped." % date_str)
def add_occupation(self, fields, data):
"""Read the ROLE property of a VCard."""
occupation = data.strip()
if occupation:
event = Event()
event.set_type(EventType(EventType.OCCUPATION))
event.set_description(self.unesc(occupation))
self.database.add_event(event, self.trans)
event_ref = EventRef()
event_ref.set_reference_handle(event.get_handle())
self.person.add_event_ref(event_ref)
def add_url(self, fields, data):
"""Read the URL property of a VCard."""
href = data.strip()
if href:
url = Url()
url.set_path(self.unesc(href))
self.person.add_url(url)
def add_email(self, fields, data):
"""Read the EMAIL property of a VCard."""
email = data.strip()
if email:
url = Url()
url.set_type(UrlType(UrlType.EMAIL))
url.set_path(self.unesc(email))
self.person.add_url(url)
|
Forage/Gramps
|
gramps/plugins/importer/importvcard.py
|
Python
|
gpl-2.0
| 19,982
|
[
"Brian"
] |
632a2f319a4a95ff4bd5cc24fb8d939a6759f04c520eb6322c04b585bf57c738
|
# -*- coding: utf-8 -*-
#########################################################################
#
# Copyright (C) 2016 OSGeo
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
#########################################################################
from django.utils.translation import ugettext_lazy as _
LINK_TYPES = ['original', 'data', 'image', 'metadata', 'html',
'OGC:WMS', 'OGC:WFS', 'OGC:WCS']
HIERARCHY_LEVELS = (
('series', _('series')),
('software', _('computer program or routine')),
('featureType', _('feature type')),
('model', _('copy or imitation of an existing or hypothetical object')),
('collectionHardware', _('collection hardware')),
('collectionSession', _('collection session')),
('nonGeographicDataset', _('non-geographic data')),
('propertyType', _('property type')),
('fieldSession', _('field session')),
('dataset', _('dataset')),
('service', _('service interfaces')),
('attribute', _('attribute class')),
('attributeType', _('characteristic of a feature')),
('tile', _('tile or spatial subset of geographic data')),
('feature', _('feature')),
('dimensionGroup', _('dimension group')),
)
UPDATE_FREQUENCIES = (
('unknown', _('frequency of maintenance for the data is not known')),
('continual', _('data is repeatedly and frequently updated')),
('notPlanned', _('there are no plans to update the data')),
('daily', _('data is updated each day')),
('annually', _('data is updated every year')),
('asNeeded', _('data is updated as deemed necessary')),
('monthly', _('data is updated each month')),
('fortnightly', _('data is updated every two weeks')),
('irregular',
_('data is updated in intervals that are uneven in duration')),
('weekly', _('data is updated on a weekly basis')),
('biannually', _('data is updated twice each year')),
('quarterly', _('data is updated every three months')),
)
CONTACT_FIELDS = [
'name',
'organization',
'position',
'voice',
'facsimile',
'delivery_point',
'city',
'administrative_area',
'postal_code',
'country',
'email',
'role'
]
DEFAULT_SUPPLEMENTAL_INFORMATION = _(
_('No information provided')
)
COUNTRIES = (
('AFG', 'Afghanistan'),
('ALA', 'Aland Islands'),
('ALB', 'Albania'),
('DZA', 'Algeria'),
('ASM', 'American Samoa'),
('AND', 'Andorra'),
('AGO', 'Angola'),
('AIA', 'Anguilla'),
('ATG', 'Antigua and Barbuda'),
('ARG', 'Argentina'),
('ARM', 'Armenia'),
('ABW', 'Aruba'),
('AUS', 'Australia'),
('AUT', 'Austria'),
('AZE', 'Azerbaijan'),
('BHS', 'Bahamas'),
('BHR', 'Bahrain'),
('BGD', 'Bangladesh'),
('BRB', 'Barbados'),
('BLR', 'Belarus'),
('BEL', 'Belgium'),
('BLZ', 'Belize'),
('BEN', 'Benin'),
('BMU', 'Bermuda'),
('BTN', 'Bhutan'),
('BOL', 'Bolivia'),
('BIH', 'Bosnia and Herzegovina'),
('BWA', 'Botswana'),
('BRA', 'Brazil'),
('VGB', 'British Virgin Islands'),
('BRN', 'Brunei Darussalam'),
('BGR', 'Bulgaria'),
('BFA', 'Burkina Faso'),
('BDI', 'Burundi'),
('KHM', 'Cambodia'),
('CMR', 'Cameroon'),
('CAN', 'Canada'),
('CPV', 'Cape Verde'),
('CYM', 'Cayman Islands'),
('CAF', 'Central African Republic'),
('TCD', 'Chad'),
('CIL', 'Channel Islands'),
('CHL', 'Chile'),
('CHN', 'China'),
('HKG', 'China - Hong Kong'),
('MAC', 'China - Macao'),
('COL', 'Colombia'),
('COM', 'Comoros'),
('COG', 'Congo'),
('COK', 'Cook Islands'),
('CRI', 'Costa Rica'),
('CIV', 'Cote d\'Ivoire'),
('HRV', 'Croatia'),
('CUB', 'Cuba'),
('CYP', 'Cyprus'),
('CZE', 'Czech Republic'),
('PRK', 'Democratic People\'s Republic of Korea'),
('COD', 'Democratic Republic of the Congo'),
('DNK', 'Denmark'),
('DJI', 'Djibouti'),
('DMA', 'Dominica'),
('DOM', 'Dominican Republic'),
('ECU', 'Ecuador'),
('EGY', 'Egypt'),
('SLV', 'El Salvador'),
('GNQ', 'Equatorial Guinea'),
('ERI', 'Eritrea'),
('EST', 'Estonia'),
('ETH', 'Ethiopia'),
('FRO', 'Faeroe Islands'),
('FLK', 'Falkland Islands (Malvinas)'),
('FJI', 'Fiji'),
('FIN', 'Finland'),
('FRA', 'France'),
('GUF', 'French Guiana'),
('PYF', 'French Polynesia'),
('GAB', 'Gabon'),
('GMB', 'Gambia'),
('GEO', 'Georgia'),
('DEU', 'Germany'),
('GHA', 'Ghana'),
('GIB', 'Gibraltar'),
('GRC', 'Greece'),
('GRL', 'Greenland'),
('GRD', 'Grenada'),
('GLP', 'Guadeloupe'),
('GUM', 'Guam'),
('GTM', 'Guatemala'),
('GGY', 'Guernsey'),
('GIN', 'Guinea'),
('GNB', 'Guinea-Bissau'),
('GUY', 'Guyana'),
('HTI', 'Haiti'),
('VAT', 'Holy See (Vatican City)'),
('HND', 'Honduras'),
('HUN', 'Hungary'),
('ISL', 'Iceland'),
('IND', 'India'),
('IDN', 'Indonesia'),
('IRN', 'Iran'),
('IRQ', 'Iraq'),
('IRL', 'Ireland'),
('IMN', 'Isle of Man'),
('ISR', 'Israel'),
('ITA', 'Italy'),
('JAM', 'Jamaica'),
('JPN', 'Japan'),
('JEY', 'Jersey'),
('JOR', 'Jordan'),
('KAZ', 'Kazakhstan'),
('KEN', 'Kenya'),
('KIR', 'Kiribati'),
('KWT', 'Kuwait'),
('KGZ', 'Kyrgyzstan'),
('LAO', 'Lao People\'s Democratic Republic'),
('LVA', 'Latvia'),
('LBN', 'Lebanon'),
('LSO', 'Lesotho'),
('LBR', 'Liberia'),
('LBY', 'Libyan Arab Jamahiriya'),
('LIE', 'Liechtenstein'),
('LTU', 'Lithuania'),
('LUX', 'Luxembourg'),
('MKD', 'Macedonia'),
('MDG', 'Madagascar'),
('MWI', 'Malawi'),
('MYS', 'Malaysia'),
('MDV', 'Maldives'),
('MLI', 'Mali'),
('MLT', 'Malta'),
('MHL', 'Marshall Islands'),
('MTQ', 'Martinique'),
('MRT', 'Mauritania'),
('MUS', 'Mauritius'),
('MYT', 'Mayotte'),
('MEX', 'Mexico'),
('FSM', 'Micronesia, Federated States of'),
('MCO', 'Monaco'),
('MNG', 'Mongolia'),
('MNE', 'Montenegro'),
('MSR', 'Montserrat'),
('MAR', 'Morocco'),
('MOZ', 'Mozambique'),
('MMR', 'Myanmar'),
('NAM', 'Namibia'),
('NRU', 'Nauru'),
('NPL', 'Nepal'),
('NLD', 'Netherlands'),
('ANT', 'Netherlands Antilles'),
('NCL', 'New Caledonia'),
('NZL', 'New Zealand'),
('NIC', 'Nicaragua'),
('NER', 'Niger'),
('NGA', 'Nigeria'),
('NIU', 'Niue'),
('NFK', 'Norfolk Island'),
('MNP', 'Northern Mariana Islands'),
('NOR', 'Norway'),
('PSE', 'Occupied Palestinian Territory'),
('OMN', 'Oman'),
('PAK', 'Pakistan'),
('PLW', 'Palau'),
('PAN', 'Panama'),
('PNG', 'Papua New Guinea'),
('PRY', 'Paraguay'),
('PER', 'Peru'),
('PHL', 'Philippines'),
('PCN', 'Pitcairn'),
('POL', 'Poland'),
('PRT', 'Portugal'),
('PRI', 'Puerto Rico'),
('QAT', 'Qatar'),
('KOR', 'Republic of Korea'),
('MDA', 'Republic of Moldova'),
('REU', 'Reunion'),
('ROU', 'Romania'),
('RUS', 'Russian Federation'),
('RWA', 'Rwanda'),
('BLM', 'Saint-Barthelemy'),
('SHN', 'Saint Helena'),
('KNA', 'Saint Kitts and Nevis'),
('LCA', 'Saint Lucia'),
('MAF', 'Saint-Martin (French part)'),
('SPM', 'Saint Pierre and Miquelon'),
('VCT', 'Saint Vincent and the Grenadines'),
('WSM', 'Samoa'),
('SMR', 'San Marino'),
('STP', 'Sao Tome and Principe'),
('SAU', 'Saudi Arabia'),
('SEN', 'Senegal'),
('SRB', 'Serbia'),
('SYC', 'Seychelles'),
('SLE', 'Sierra Leone'),
('SGP', 'Singapore'),
('SVK', 'Slovakia'),
('SVN', 'Slovenia'),
('SLB', 'Solomon Islands'),
('SOM', 'Somalia'),
('ZAF', 'South Africa'),
('SSD', 'South Sudan'),
('ESP', 'Spain'),
('LKA', 'Sri Lanka'),
('SDN', 'Sudan'),
('SUR', 'Suriname'),
('SJM', 'Svalbard and Jan Mayen Islands'),
('SWZ', 'Swaziland'),
('SWE', 'Sweden'),
('CHE', 'Switzerland'),
('SYR', 'Syrian Arab Republic'),
('TJK', 'Tajikistan'),
('THA', 'Thailand'),
('TLS', 'Timor-Leste'),
('TGO', 'Togo'),
('TKL', 'Tokelau'),
('TON', 'Tonga'),
('TTO', 'Trinidad and Tobago'),
('TUN', 'Tunisia'),
('TUR', 'Turkey'),
('TKM', 'Turkmenistan'),
('TCA', 'Turks and Caicos Islands'),
('TUV', 'Tuvalu'),
('UGA', 'Uganda'),
('UKR', 'Ukraine'),
('ARE', 'United Arab Emirates'),
('GBR', 'United Kingdom'),
('TZA', 'United Republic of Tanzania'),
('USA', 'United States of America'),
('VIR', 'United States Virgin Islands'),
('URY', 'Uruguay'),
('UZB', 'Uzbekistan'),
('VUT', 'Vanuatu'),
('VEN', 'Venezuela (Bolivarian Republic of)'),
('VNM', 'Viet Nam'),
('WLF', 'Wallis and Futuna Islands'),
('ESH', 'Western Sahara'),
('YEM', 'Yemen'),
('ZMB', 'Zambia'),
('ZWE', 'Zimbabwe'),
)
# Taken from http://www.w3.org/WAI/ER/IG/ert/iso639.htm
ALL_LANGUAGES = (
('abk', 'Abkhazian'),
('aar', 'Afar'),
('afr', 'Afrikaans'),
('amh', 'Amharic'),
('ara', 'Arabic'),
('asm', 'Assamese'),
('aym', 'Aymara'),
('aze', 'Azerbaijani'),
('bak', 'Bashkir'),
('ben', 'Bengali'),
('bih', 'Bihari'),
('bis', 'Bislama'),
('bre', 'Breton'),
('bul', 'Bulgarian'),
('bel', 'Byelorussian'),
('cat', 'Catalan'),
('chi', 'Chinese'),
('cos', 'Corsican'),
('dan', 'Danish'),
('dzo', 'Dzongkha'),
('eng', 'English'),
('fra', 'French'),
('epo', 'Esperanto'),
('est', 'Estonian'),
('fao', 'Faroese'),
('fij', 'Fijian'),
('fin', 'Finnish'),
('fry', 'Frisian'),
('glg', 'Gallegan'),
('ger', 'German'),
('gre', 'Greek'),
('kal', 'Greenlandic'),
('grn', 'Guarani'),
('guj', 'Gujarati'),
('hau', 'Hausa'),
('heb', 'Hebrew'),
('hin', 'Hindi'),
('hun', 'Hungarian'),
('ind', 'Indonesian'),
('ina', 'Interlingua (International Auxiliary language Association)'),
('iku', 'Inuktitut'),
('ipk', 'Inupiak'),
('ita', 'Italian'),
('jpn', 'Japanese'),
('kan', 'Kannada'),
('kas', 'Kashmiri'),
('kaz', 'Kazakh'),
('khm', 'Khmer'),
('kin', 'Kinyarwanda'),
('kir', 'Kirghiz'),
('kor', 'Korean'),
('kur', 'Kurdish'),
('oci', 'Langue d \'Oc (post 1500)'),
('lao', 'Lao'),
('lat', 'Latin'),
('lav', 'Latvian'),
('lin', 'Lingala'),
('lit', 'Lithuanian'),
('mlg', 'Malagasy'),
('mlt', 'Maltese'),
('mar', 'Marathi'),
('mol', 'Moldavian'),
('mon', 'Mongolian'),
('nau', 'Nauru'),
('nep', 'Nepali'),
('nor', 'Norwegian'),
('ori', 'Oriya'),
('orm', 'Oromo'),
('pan', 'Panjabi'),
('pol', 'Polish'),
('por', 'Portuguese'),
('pus', 'Pushto'),
('que', 'Quechua'),
('roh', 'Rhaeto-Romance'),
('run', 'Rundi'),
('rus', 'Russian'),
('smo', 'Samoan'),
('sag', 'Sango'),
('san', 'Sanskrit'),
('scr', 'Serbo-Croatian'),
('sna', 'Shona'),
('snd', 'Sindhi'),
('sin', 'Singhalese'),
('ssw', 'Siswant'),
('slv', 'Slovenian'),
('som', 'Somali'),
('sot', 'Sotho'),
('spa', 'Spanish'),
('sun', 'Sudanese'),
('swa', 'Swahili'),
('tgl', 'Tagalog'),
('tgk', 'Tajik'),
('tam', 'Tamil'),
('tat', 'Tatar'),
('tel', 'Telugu'),
('tha', 'Thai'),
('tir', 'Tigrinya'),
('tog', 'Tonga (Nyasa)'),
('tso', 'Tsonga'),
('tsn', 'Tswana'),
('tur', 'Turkish'),
('tuk', 'Turkmen'),
('twi', 'Twi'),
('uig', 'Uighur'),
('ukr', 'Ukrainian'),
('urd', 'Urdu'),
('uzb', 'Uzbek'),
('vie', 'Vietnamese'),
('vol', 'Volapük'),
('wol', 'Wolof'),
('xho', 'Xhosa'),
('yid', 'Yiddish'),
('yor', 'Yoruba'),
('zha', 'Zhuang'),
('zul', 'Zulu'),
)
CHARSETS = (('', 'None/Unknown'),
('UTF-8', 'UTF-8/Unicode'),
('ISO-8859-1', 'Latin1/ISO-8859-1'),
('ISO-8859-2', 'Latin2/ISO-8859-2'),
('ISO-8859-3', 'Latin3/ISO-8859-3'),
('ISO-8859-4', 'Latin4/ISO-8859-4'),
('ISO-8859-5', 'Latin5/ISO-8859-5'),
('ISO-8859-6', 'Latin6/ISO-8859-6'),
('ISO-8859-7', 'Latin7/ISO-8859-7'),
('ISO-8859-8', 'Latin8/ISO-8859-8'),
('ISO-8859-9', 'Latin9/ISO-8859-9'),
('ISO-8859-10', 'Latin10/ISO-8859-10'),
('ISO-8859-13', 'Latin13/ISO-8859-13'),
('ISO-8859-14', 'Latin14/ISO-8859-14'),
('ISO8859-15', 'Latin15/ISO-8859-15'),
('Big5', 'BIG5'),
('EUC-JP', 'EUC-JP'),
('EUC-KR', 'EUC-KR'),
('GBK', 'GBK'),
('GB18030', 'GB18030'),
('Shift_JIS', 'Shift_JIS'),
('KOI8-R', 'KOI8-R'),
('KOI8-U', 'KOI8-U'),
('cp874', 'Windows CP874'),
('windows-1250', 'Windows CP1250'),
('windows-1251', 'Windows CP1251'),
('windows-1252', 'Windows CP1252'),
('windows-1253', 'Windows CP1253'),
('windows-1254', 'Windows CP1254'),
('windows-1255', 'Windows CP1255'),
('windows-1256', 'Windows CP1256'),
('windows-1257', 'Windows CP1257'),
('windows-1258', 'Windows CP1258'))
|
tomkralidis/geonode
|
geonode/base/enumerations.py
|
Python
|
gpl-3.0
| 13,778
|
[
"BWA"
] |
798d1a0c46ddc227942f39dce7d8a90b6d7bc495b8b806b19d83387504ad4fae
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# --- BEGIN_HEADER ---
#
# createre - [insert a few words of module description on this line]
# Copyright (C) 2003-2015 The MiG Project lead by Brian Vinter
#
# This file is part of MiG.
#
# MiG is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# MiG is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
# -- END_HEADER ---
#
"""Creation of runtime environments"""
import os
import base64
import tempfile
import shared.returnvalues as returnvalues
from shared.defaults import max_software_entries, max_environment_entries
from shared.functional import validate_input_and_cert, REJECT_UNSET
from shared.handlers import correct_handler
from shared.init import initialize_main_variables
from shared.refunctions import create_runtimeenv
from shared.validstring import valid_dir_input
def signature():
"""Signature of the main function"""
defaults = {
're_name': REJECT_UNSET,
'redescription': ['Not available'],
'testprocedure': [''],
'software': [],
'environment': [],
'verifystdout': [''],
'verifystderr': [''],
'verifystatus': [''],
}
return ['text', defaults]
def main(client_id, user_arguments_dict):
"""Main function used by front end"""
(configuration, logger, output_objects, op_name) = \
initialize_main_variables(client_id, op_header=False)
defaults = signature()[1]
output_objects.append({'object_type': 'header', 'text'
: 'Create runtime environment'})
(validate_status, accepted) = validate_input_and_cert(
user_arguments_dict,
defaults,
output_objects,
client_id,
configuration,
allow_rejects=False,
)
if not validate_status:
return (accepted, returnvalues.CLIENT_ERROR)
if not correct_handler('POST'):
output_objects.append(
{'object_type': 'error_text', 'text'
: 'Only accepting POST requests to prevent unintended updates'})
return (output_objects, returnvalues.CLIENT_ERROR)
re_name = accepted['re_name'][-1].strip().upper().strip()
redescription = accepted['redescription'][-1].strip()
testprocedure = accepted['testprocedure'][-1].strip()
software = [i.strip() for i in accepted['software']]
environment = [i.strip() for i in accepted['environment']]
verifystdout = accepted['verifystdout'][-1].strip()
verifystderr = accepted['verifystderr'][-1].strip()
verifystatus = accepted['verifystatus'][-1].strip()
if not valid_dir_input(configuration.re_home, re_name):
logger.warning(
"possible illegal directory traversal attempt re_name '%s'"
% re_name)
output_objects.append({'object_type': 'error_text', 'text'
: 'Illegal runtime environment name: "%s"'
% re_name})
return (output_objects, returnvalues.CLIENT_ERROR)
software_entries = len(software)
if software_entries > max_software_entries:
output_objects.append({'object_type': 'error_text', 'text'
: 'Too many software entries (%s), max %s'
% (software_entries,
max_software_entries)})
return (output_objects, returnvalues.CLIENT_ERROR)
environment_entries = len(environment)
if environment_entries > max_environment_entries:
output_objects.append({'object_type': 'error_text', 'text'
: 'Too many environment entries (%s), max %s'
% (environment_entries,
max_environment_entries)})
return (output_objects, returnvalues.CLIENT_ERROR)
# create file to be parsed - force single line description
content = """::RENAME::
%s
::DESCRIPTION::
%s
""" % (re_name,
redescription.replace('\n', '<br />'))
if testprocedure:
verify_specified = []
if verifystdout:
content += '''::VERIFYSTDOUT::
%s
''' % verifystdout
verify_specified.append('verify_runtime_env_%s.stdout'
% re_name)
if verifystderr:
content += '''::VERIFYSTDERR::
%s
''' % verifystderr
verify_specified.append('verify_runtime_env_%s.stderr'
% re_name)
if verifystatus:
verify_specified.append('verify_runtime_env_%s.status'
% re_name)
content += '''::VERIFYSTATUS::
%s
''' % verifystatus
if verify_specified:
testprocedure += '''
::VERIFYFILES::
'''
for to_verify in verify_specified:
testprocedure += '%s\n' % to_verify
# testprocedure must be encoded since it contains mRSL code and
# keywords that may interfere with the runtime environment keywords
# in reality it is \n::KEYWORD::\n lines that may cause problems.
# For now the string is simply base64 encoded
content += '''::TESTPROCEDURE::
%s
'''\
% base64.encodestring(testprocedure).strip()
#print "testprocedure %s decoded %s" % \
# (testprocedure,
# base64.decodestring(base64.encodestring(testprocedure).strip()))
for software_ele in software:
content += '''::SOFTWARE::
%s
''' % software_ele.strip()
for environment_ele in environment:
content += '''::ENVIRONMENTVARIABLE::
%s
'''\
% environment_ele.strip()
try:
(filehandle, tmpfile) = tempfile.mkstemp(text=True)
os.write(filehandle, content)
os.close(filehandle)
except Exception, err:
output_objects.append({'object_type': 'error_text', 'text'
: 'Error preparing new runtime environment! %s'
% err})
return (output_objects, returnvalues.SYSTEM_ERROR)
(retval, retmsg) = create_runtimeenv(tmpfile, client_id,
configuration)
if not retval:
output_objects.append({'object_type': 'error_text', 'text'
: 'Error creating new runtime environment: %s'
% retmsg})
return (output_objects, returnvalues.SYSTEM_ERROR)
try:
os.remove(tmpfile)
except Exception:
pass
output_objects.append({'object_type': 'text', 'text'
: 'New runtime environment %s successfuly created!'
% re_name})
output_objects.append({'object_type': 'link',
'destination': 'showre.py?re_name=%s' % re_name,
'class': 'viewlink',
'title': 'View your new runtime environment',
'text': 'View new %s runtime environment'
% re_name,
})
return (output_objects, returnvalues.OK)
|
heromod/migrid
|
mig/shared/functionality/createre.py
|
Python
|
gpl-2.0
| 7,612
|
[
"Brian"
] |
f892e807cb8a51e68d22458a12ffaea706e5d68c023a50e0442346c5269ec297
|
import unicodedata # For accents removing
import collections
import re # For checkMistakeNames function
def removeAccents(dataToTranslate):
"""Two function to remove accents, either one should work.
This is for testing which one runs faster. """
dataToTranslate = str(dataToTranslate)
return unicodedata.normalize('NFD',
dataToTranslate).encode('ASCII', 'ignore').decode("utf-8")
def removeAccents2(dataToTranslate):
dataToTranslate = str(dataToTranslate)
"""Two function to remove accents, either one should work.
This is for testing which one runs faster. """
return ''.join((c for c in
unicodedata.normalize('NFD', dataToTranslate) if unicodedata.category(c) != 'Mn'))
def nicknameMapping():
""" There are better ways to get this data. But I think for the purpose of this program,
it's best to just use this customized dictionary"""
D = {}
D['betty'] = 'elizabeth'
D['liz'] = 'elizabeth'
D['lizzy'] = 'elizabeth'
D['ana'] = 'anna'
D['ann'] = 'anna'
D['anne'] = 'anna'
D['annette'] = 'anna'
D['abigail'] = 'abby'
D['abbie'] = 'abby'
D['alexander'] = 'alexander'
D['curtis'] = 'curt'
D['alek'] = 'alex'
D['aleksandar'] = 'alex'
D['aleksander'] = 'alex'
D['aleksandra'] = 'alex'
D['alexandr'] = 'alex'
D['alexandra'] = 'alex'
D['alexandre'] = 'alex'
D['alexandru'] = 'alex'
D['alexei'] = 'alexis'
D['alan'] = 'allan'
D['alen'] = 'allen'
D['andrew'] = 'andy'
D['andrei'] = 'andre'
D['arthur'] = 'arthur'
D['benjamin'] = 'ben'
D['bernie'] = 'bernard'
D['bob'] = 'robert'
D['bobby'] = 'robert'
D['boby'] = 'robert'
D['rob'] = 'robert'
D['brian'] = 'brian'
D['bryan'] = 'brian'
D['chad'] = 'chardwick'
D['christopher'] = 'chris'
D['christoph'] = 'chris'
D['christophe'] = 'chris'
D['clifford'] = 'cliff'
D['cornelus'] = 'cornelius'
D['cornelis'] = 'cornelius'
D['david'] = 'dave'
D['daniel'] = 'dan'
D['danny'] = 'dan'
D['dennis'] = 'denise'
D['denis'] = 'denise'
D['dmitry'] = 'dmitri'
D['dimitrios'] = 'dmitri'
D['dimitris'] = 'dmitri'
D['douglas'] = 'doug'
D['ed'] = 'edward'
D['eddy'] = 'eddie'
D['erik'] = 'eric'
D['francoise'] = 'francois'
D['fredrick'] = 'fred'
D['fredrik'] = 'fred'
D['frdric'] = 'fred'
D['frdrik'] = 'fred'
D['frdrick'] = 'fred'
D['fredrik'] = 'fred'
D['frederick'] = 'fred'
D['frederic'] = 'fred'
D['gregory'] = 'greg'
D['gregg'] = 'greg'
D['gregor'] = 'greg'
D['gregorio'] = 'greg'
D['freddy'] = 'fred'
D['jeffrey'] = 'jeff'
D['josef'] = 'joseph'
D['josep'] = 'joseph'
D['joey'] = 'joe'
D['jonathon'] = 'johnathon'
D['joshua'] = 'josh'
D['kenneth'] = 'ken'
D['kenny'] = 'ken'
D['leonard'] = 'leo'
D['leonid'] = 'leo'
D['leonel'] = 'leo'
D['leonardo'] = 'leo'
D['louis'] = 'lou'
D['luis'] = 'louise'
D['luise'] = 'louise'
D['luiz'] = 'louise'
D['lukas'] = 'lucas'
D['lukasz'] = 'lucas'
D['luc'] = 'luke'
D['marc'] = 'mark'
D['matt'] = 'matthew'
D['marvin'] = 'marv'
D['max'] = 'maxwell'
D['michael'] = 'mike'
D['micheal'] = 'mike'
D['mitchell'] = 'mitch'
D['mitchel'] = 'mitch'
D['mohamed'] = 'mo'
D['mohammad'] = 'mo'
D['nathan'] = 'nate'
D['nathaniel'] = 'nate'
D['nicolas'] = 'nick'
D['nicholas'] = 'nick'
D['nic'] = 'nick'
D['patrick'] = 'pat'
D['patrik'] = 'pat'
D['peter'] = 'pete'
D['phillip'] = 'phil'
D['phillipe'] = 'phil'
D['phillippe'] = 'phil'
D['philip'] = 'phil'
D['rafael'] = 'rafi'
D['rafaele'] = 'rafi'
D['raphael'] = 'rafi'
D['raymond'] = 'ray'
D['rich'] = 'rick'
D['richard'] = 'rick'
D['dick'] = 'richard'
D['robby'] = 'rob'
D['robert'] = 'rob'
D['roberto'] = 'rob'
D['stanley'] = 'stan'
D['stephen'] = 'steve'
D['steven'] = 'steve'
D['samuel'] = 'sam'
D['sammy'] = 'sam'
D['terrance'] = 'terry'
D['terrence'] = 'terry'
D['terence'] = 'terry'
D['terri'] = 'terry'
D['theodore'] = 'ted'
D['tobias'] = 'tobias'
D['toby'] = 'tobias'
D['tobi'] = 'tobias'
D['thomas'] = 'tom'
D['tomas'] = 'tom'
D['timothy'] = 'tim'
D['vincent'] = 'vince'
D['vlad'] = 'vladimir'
D['walter'] = 'walt'
D['william'] = 'will'
D['catherine'] = 'cathy'
D['jennifer'] = 'jen'
D['jenifer'] = 'jen'
D['katherine'] = 'kathy'
D['kathleen'] = 'kathy'
D['kimberly'] = 'kim'
D['pamela'] = 'pam'
D['sara'] = 'sarah'
D['sophie'] = 'sophia'
D['susan'] = 'sue'
D['susana'] = 'susanna'
D['suzan'] = 'sue'
D['teresa'] = 'terry'
D['terese'] = 'terry'
D['valerie'] = 'val'
D['valery'] = 'val'
D['victoria'] = 'vicky'
D['vickie'] = 'vicky'
D['vicki'] = 'vicky'
return D
def checkMistakenNames(nameString):
"""There are some names in the database that is not of person names,
we don't consider those in this case"""
if ' or ' in nameString:
return True
if ' for ' in nameString:
return True
if ' and ' in nameString:
return True
if nameString[0] == 'a' and nameString[1] == ' ':
return True
if ' to ' in nameString:
return True
if ' in ' in nameString:
return True
if ' on ' in nameString:
return True
if ' of ' in nameString:
return True
return False
def removeNonalphanum(nameString):
"""This function deletes the mistakenly entered characters"""
nameString = nameString.replace("~", "")
nameString = nameString.replace("`", "")
nameString = nameString.replace("|", "")
nameString = nameString.replace("_", "")
nameString = nameString.replace("\\", "")
nameString = nameString.replace(" ", " ") #Multiple whitespace characters
nameString = nameString.replace(",", " ")
nameString = nameString.replace("'", "")
# This takes cares of asian names I think.
# Need to check if this generates false positives
nameString = nameString.replace("-", " ")
# nameString = nameString.replace("\. ", " ")
nameString = nameString.replace("\.", " ")
nameString = nameString.replace("\?", " ")
return nameString
def SpecialCharCases(nameString):
"""Some special cases handling here."""
nameString = nameString.replace(" iii", "")
nameString = nameString.replace(" ii", "")
nameString = nameString.replace(" iv", "")
nameString = nameString.replace(" mbbs", "") ## need to check this
return nameString
def generateNamesWithInitials(nameString):
""" A function that will generate initials given a normal name"""
## Perhaps we want a dot in the end? J. Smith oppose to J Smith?
wordArray = nameString.split()
numWords = len(wordArray)
index = 0
stringWithInitials = ''
while (index < numWords-1):
stringWithInitials += wordArray[index][0]
stringWithInitials += '. '
index += 1
stringWithInitials += wordArray[numWords-1]
return stringWithInitials
def seperateInitials(nameString):
""" A function that seperate the names MK Martin to M K Martin.
Here is each character in it's partial is in caps, then we assume it to be multiple initials
******Notice we should run this function before we are converting cases!!!"""
wordArray = nameString.split()
charList = []
resultString = ""
for word in wordArray:
if word == word.upper():
resultString += '. '.join(word)
## if all capitalized
if len(word) == len(nameString):
resultString += '.'
else:
# Need to append space if it's not the last
if resultString[-1] != ' ':
resultString += '. '
resultString += word
resultString = resultString.strip() # Get rid of the white spaces
return resultString
def swapCharacterWithinNames(nameString, nameDict):
""" A function that replace the names with other characters based on a dictinary.
Here we could replace all the characters or just some.
Right now it's just replacing all.
We need to discuss this function, it's using a dictionary right now because
I'm pretty sure the runtime would be huge if we try all """
for key in nameDict:
tempString = nameString.replace(key, nameDict[key])
if nameString != tempString: nameString = tempString
return nameString
def cleanUpName(dataToTranslate):
cleanedName = removeAccents(dataToTranslate)
cleanedName = removeNonalphanum(cleanedName)
cleanedName = SpecialCharCases(cleanedName)
# Right now I am not separating initials, I don't know if its necessary for
# the prefix scan implementation.
#cleanedName = separateInitials(cleanedName)
return cleanedName.lower()
def cleanUpTitle(title):
title = removeAccents(title)
title = removeNonalphanum(title)
return title
|
bchalala/cs145-duplicats-in-space
|
authorParserHelper.py
|
Python
|
gpl-3.0
| 8,217
|
[
"Brian"
] |
9ee7fe21508502e82cd3979ba39ecaf4a92f092720f06681544995e5d74b8b82
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.