text
stringlengths 12
1.05M
| repo_name
stringlengths 5
86
| path
stringlengths 4
191
| language
stringclasses 1
value | license
stringclasses 15
values | size
int32 12
1.05M
| keyword
listlengths 1
23
| text_hash
stringlengths 64
64
|
|---|---|---|---|---|---|---|---|
from neuron import Neuron
import math
import random
sigm = lambda x: (1.0/(1+pow(math.e, x)))
sigmp = lambda x: sigm(x)*(1.0-sigm(x))
error = lambda x, y: 0.5*pow(x-y, 2)
def first_last_errors(errors):
sq = int(math.sqrt(len(errors)))
first_errors = sum(errors[:sq])/sq
last_errors = sum(errors[-sq:])/sq
return first_errors, last_errors
def decreasing_verdict(f, l):
v = "No"
if f > l:
v = "Yes"
return v
def report(errors):
f,l = first_last_errors(errors)
v = decreasing_verdict(f, l)
return "Early avg error: {0}, Late avg error: {1}, decreasing: {2}\n".format(f, l, v)
def test_1(steps):
weights = 3
print "Linear combination of weights {0}, {1} steps".format(weights, steps)
neuron = Neuron(weights, sigm, sigmp, error)
errors = []
for i in range(steps):
inputs = [random.random() for r in range(weights)]
target = 2*inputs[0] + 0.3*inputs[1] - 0.7*inputs[2]
neuron.learn_1(inputs, target)
errors.append(neuron.last_error)
print report(errors)
def test_2(steps):
weights = 1
print "Target itself {0}, {1} steps".format(weights, steps)
neuron = Neuron(weights, sigm, sigmp, error)
errors = []
for i in range(steps):
inputs = [random.random() for r in range(weights)]
target = inputs[0]
neuron.learn_1(inputs, target)
errors.append(neuron.last_error)
print report(errors)
def test_3(steps):
weights = 1
print "Target converse {0}, {1} steps".format(weights, steps)
neuron = Neuron(weights, sigm, sigmp, error)
errors = []
for i in range(steps):
inputs = [random.random() for r in range(weights)]
target = 1.0 - inputs[0]
neuron.learn_1(inputs, target)
errors.append(neuron.last_error)
print report(errors)
def test_4(steps):
weights = 40
print "Target max - min {0}, {1} steps".format(weights, steps)
neuron = Neuron(weights, sigm, sigmp, error)
errors = []
for i in range(steps):
inputs = [random.random() for r in range(weights)]
imax = max(inputs)
imin = min(inputs)
target = imax - imin
neuron.learn_1(inputs, target)
errors.append(neuron.last_error)
print report(errors)
def test_5(steps):
weights = 40
print "Target sqrt(avg) {0}, {1} steps".format(weights, steps)
neuron = Neuron(weights, sigm, sigmp, error)
errors = []
for i in range(steps):
inputs = [random.random() for r in range(weights)]
avg = sum(inputs)/len(inputs)
target = math.sqrt(avg)
neuron.learn_1(inputs, target)
errors.append(neuron.last_error)
print report(errors)
if __name__ == '__main__':
test_1(500)
test_2(500)
test_3(500)
test_4(500)
test_5(6524)
|
rylans/nn-from-scratch
|
neuron_test.py
|
Python
|
apache-2.0
| 2,816
|
[
"NEURON"
] |
008b49e3a16230ffd62aff811949b9b5c477a53a4eab1b63454b3e73bbff1b38
|
from mdtraj.testing import get_fn, eq
from mdtraj import element
import mdtraj as md
def test_element_0():
t = md.load(get_fn('bpti.pdb'))
a = t.top.atom(15)
H = element.Element.getBySymbol('H')
eq(a.element, element.hydrogen)
|
casawa/mdtraj
|
mdtraj/formats/pdb/tests/test_element.py
|
Python
|
lgpl-2.1
| 247
|
[
"MDTraj"
] |
bf16db87613200030c4800096879f58a0e3f2958b29f0066d740338b06992909
|
#
# Copyright (c) 2017, 2017, Oracle and/or its affiliates. All rights reserved.
# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
#
# This code is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License version 2 only, as
# published by the Free Software Foundation.
#
# This code is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
# version 2 for more details (a copy is included in the LICENSE file that
# accompanied this code).
#
# You should have received a copy of the GNU General Public License version
# 2 along with this work; if not, write to the Free Software Foundation,
# Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
#
# Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
# or visit www.oracle.com if you need additional information or have any
# questions.
#
from os.path import join
'''
Handles all the editing of R FFI header files from the GNUR include directory to the
FastR include directory.
'''
# variables in Rinternals.h that are Java Objects and so remapped to functions
r_internals_vars = ['R_GlobalEnv', 'R_BaseEnv', 'R_BaseNamespace', 'R_NamespaceRegistry']
def edinclude(args):
'''
edit GNU include files for FASTR
args[0] path to GNUR include directory
'''
ed_r_internals(args[0])
ed_r_interface(args[0])
ed_graphicsengine(args[0])
ed_rconfig(args[0])
use_internals_section = '''#ifdef FASTR
// packages defining USE_INTERNALS expect certain defs (e.g. isNull) to be there
#ifdef USE_RINTERNALS
#define USE_RINTERNALS_DEFS
#endif
#undef USE_RINTERNALS
#else
'''
sexp = '''#ifdef FASTR
typedef void *SEXP;
#define DATAPTR(x)\t\tR_DATAPTR(x)
void *(R_DATAPTR)(SEXP x);
#define IS_BYTES IS_BYTES
#define IS_LATIN1 IS_LATIN1
#define IS_ASCII IS_ASCII
#define IS_UTF8 IS_UTF8
#define ENC_KNOWN ENC_KNOWN
Rboolean IS_BYTES(SEXP x);
Rboolean IS_LATIN1(SEXP x);
Rboolean IS_ASCII(SEXP x);
Rboolean IS_UTF8(SEXP x);
Rboolean ENC_KNOWN(SEXP x);
#else
'''
use_internals_begin = '''#if defined (USE_RINTERNALS_DEFS) && (defined (USE_RINTERNALS) || defined (FASTR))
'''
use_internals_end = '''#endif
#ifdef USE_RINTERNALS
'''
def ed_r_internals(gnu_dir):
r_internals_h = join(gnu_dir, 'Rinternals.h')
with open(r_internals_h) as f:
lines = f.readlines()
use_rinternals_count = 0
with open('Rinternals.h', 'w') as f:
for line in lines:
if '== USE_RINTERNALS section' in line:
f.write(use_internals_section)
f.write(line)
f.write('#endif\n')
elif 'typedef struct SEXPREC *SEXP' in line:
f.write(sexp)
f.write(line)
f.write('#endif\n')
elif '#ifdef USE_RINTERNALS' in line:
if use_rinternals_count > 0:
f.write(use_internals_begin)
else:
f.write(line)
use_rinternals_count = 1
elif 'macro version of R_CheckStack' in line:
f.write(use_internals_end)
f.write(line)
elif 'LibExtern' in line:
var = is_internal_var(line)
if var:
rewrite_var(f, var, line)
else:
f.write(line)
else:
f.write(line)
def rewrite_var(f, var, line):
f.write('#ifdef FASTR\n')
f.write('LibExtern SEXP FASTR_{0}();\n'.format(var))
f.write('LibExtern SEXP {0};\n'.format(var))
f.write('#ifndef NO_FASTR_REDEFINE\n')
f.write('#define {0} FASTR_{0}()\n'.format(var))
f.write('#endif\n')
f.write('#else\n')
f.write(line)
f.write('#endif\n')
def is_internal_var(line):
for var in r_internals_vars:
varsemi = var + ';'
if varsemi in line:
return var
return None
context_defs = '''#ifdef FASTR
typedef void *CTXT;
typedef void *SEXP;
extern CTXT FASTR_GlobalContext();
#define R_GlobalContext FASTR_GlobalContext()
extern CTXT R_getGlobalFunctionContext();
extern CTXT R_getParentFunctionContext(CTXT);
extern SEXP R_getContextEnv(CTXT);
extern SEXP R_getContextFun(CTXT);
extern SEXP R_getContextCall(CTXT);
extern SEXP R_getContextSrcRef(CTXT);
extern int R_insideBrowser();
extern int R_isGlobal(CTXT);
extern int R_isEqual(void*, void*);
#else
'''
interactive_rewrite = '''
#include <R_ext/RStartup.h>
#ifdef FASTR
extern Rboolean FASTR_R_Interactive();
extern Rboolean R_Interactive;
#ifndef NO_FASTR_REDEFINE
#define R_Interactive FASTR_R_Interactive()
#endif
#else
'''
def ed_r_interface(gnu_dir):
r_interface_h = join(gnu_dir, 'Rinterface.h')
with open(r_interface_h) as f:
lines = f.readlines()
with open('Rinterface.h', 'w') as f:
for line in lines:
if 'R_GlobalContext' in line:
f.write(context_defs)
f.write(line)
f.write('#endif\n')
elif 'R_Interactive' in line:
f.write(interactive_rewrite)
f.write(line)
f.write('#endif\n')
else:
f.write(line)
def ed_graphicsengine(gnu_dir):
graphicsengine_h = join(gnu_dir, 'R_ext', 'GraphicsEngine.h')
with open(graphicsengine_h) as f:
lines = f.readlines()
with open(join('R_ext', 'GraphicsEngine.h'), 'w') as f:
for line in lines:
if 'MAX_GRAPHICS_SYSTEMS' in line:
f.write(line.replace('24', '256'))
else:
f.write(line)
def ed_rconfig(gnu_dir):
'''
GNU R is built with ENABLE_NLS (internationalized strings) but FastR
does not do that in native code, so we disable it.
'''
rconfig_h = join(gnu_dir, 'Rconfig.h')
with open(rconfig_h) as f:
lines = f.readlines()
with open(join('Rconfig.h'), 'w') as f:
for line in lines:
if 'ENABLE_NLS' in line:
continue
else:
f.write(line)
|
akunft/fastr
|
mx.fastr/mx_fastr_edinclude.py
|
Python
|
gpl-2.0
| 6,178
|
[
"VisIt"
] |
0577b2d4fea81c5be814b580fc5a763090076dc0f749932552faf0bc035c2b01
|
#!/usr/bin/python
#=============================================================================================
# example files for reading in MD simulation files and performing
# statistical analyses according to manuscript "Simple tests for
# validity when sampling from thermodynamic ensembles", Michael
# R. Shirts.
#
# COPYRIGHT NOTICE
#
# Written by Michael R. Shirts <mrshirts@gmail.com>.
#
# Copyright (c) 2012 The University of Virginia. All Rights Reserved.
#
# This program is free software; you can redistribute it and/or modify it under the terms of
# the GNU General Public License as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY;
# without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
# 02110-1301, USA.
# =============================================================================================
#
#===================================================================================================
# IMPORTS
#===================================================================================================
import pdb
import numpy
import timeseries
from checkensemble import *
import optparse, sys
from optparse import OptionParser
onlyE = ['potential', 'kinetic', 'total']
requireV = ['enthalpy', 'volume', 'jointEV']
requireN = ['helmholtz', 'number', 'jointEN']
alltypes = onlyE + requireV + requireN
def read_flatfile(lines,type,N_max):
# assumes kJ/mol energies, nm^3 volumes
# allocate space
U_n = numpy.zeros([N_max], dtype=numpy.float64) # U_n[k,n] is the energy of the sample n
V_n = numpy.zeros([N_max], dtype=numpy.float64) # V_n[k,n] is the volume of the sample n
N_n = numpy.zeros([N_max], dtype=numpy.float64) # N_n[k,n] is the number of particles of the sample n
N = 0
# we assume energy is first, then volume, then n
for line in lines:
if (line[0] != '#'): # in flat file format, anything that starts with a hash is an ignored comment
elements = line.split()
numcol = len(elements)
if (numcol == 0):
print "Error: No data for data point %d" % (N)
sys.exit()
elif (numcol == 1):
if (type in onlyE):
U_n[N] = float(elements[0])
elif (type == 'volume'):
V_n[N] = float(elements[0])
elif (type == 'number'):
N_n[N] = float(elements[0])
else:
print "Error: asking for test requiring multiple variables (%s) but only provided one column of data" % (type)
sys.exit()
elif (numcol == 2):
if type in requireV and type != 'volume':
U_n[N] = float(elements[0])
V_n[N] = float(elements[1])
elif type in requireN and type != 'number':
U_n[N] = float(elements[0])
N_n[N] = float(elements[1])
else:
print "Error: asking for test (%s) incompatible with two columns of data" % (type)
elif (numcol > 2):
print "Error: there is no test that required the provided %d columns of data" % (numcol)
sys.exit()
N += 1
return U_n,V_n,N_n,N
def read_gromacs(lines,type,N_max):
# allocate space
U_n = numpy.zeros([N_max], dtype=numpy.float64) # U_n[k,n] is the energy of the sample n
V_n = numpy.zeros([N_max], dtype=numpy.float64) # V_n[k,n] is the volume of the sample n
N = 0
ematch = False
vmatch = False
for line in lines:
# Split line into elements.
if (line[0:3] == '@ s'):
elements = line.split()
whichcol = int((elements[1])[1:])+1 # figure out which column it is
if (type == 'potential'):
if (elements[3] == "\"Potential\""):
ecol = whichcol
ematch = True
if (type == 'total') or (type == 'volume') or (type == 'enthalpy') or (type == 'jointEV'):
if (elements[3] == "\"Total"):
comp = elements[3] + ' ' + elements[4]
if (comp == "\"Total Energy\""):
ecol = whichcol
ematch = True
if (type == 'kinetic'):
if (elements[3] == "\"Kinetic"):
comp = elements[3] + ' ' + elements[4]
if (comp == "\"Kinetic En.\""):
ecol = whichcol
ematch = True
if (type == 'volume') or (type == 'enthalpy') or (type == 'jointEV'):
if (elements[3] == "\"Volume\""):
vcol = whichcol
vmatch = True
if ((line[0] != '#') and (line[0] != '@')):
elements = line.split()
# what is the time of the sample
if (type != 'volume'):
energy = float(elements[ecol])
U_n[N] = energy
if (type == 'volume') or (type == 'enthalpy') or (type == 'jointEV'):
volume = float(elements[vcol])
V_n[N] = volume
N += 1
return U_n,V_n,N
def read_charmm(lines,type,N_max):
# allocate space
U_n = numpy.zeros([N_max], dtype=numpy.float64) # U_n[k,n] is the energy of the sample n
V_n = numpy.zeros([N_max], dtype=numpy.float64) # V_n[k,n] is the volume of the sample n
N = 0
ematch = False
vmatch = False
for line in lines:
elements = line.split()
if (line[0:4] == 'DYNA'):
if (line[0:8] == 'DYNA DYN'):
if (line[8:9] == ':'):
for i,e in enumerate(elements):
if (type == 'kinetic'):
if (e[0:4] == 'TOTK'):
ecol = i-1
ematch = True
if (type == 'potential'):
if (e[0:4] == 'ENER'):
ecol = i-1
ematch = True
if (type == 'total') or (type == 'volume') or (type == 'enthalpy') or (type == 'jointEV'):
if (e[0:4] == 'TOTE'):
ecol = i-1
ematch = True
elif (line[0:5] == 'DYNA>'):
U_n[N] = float(elements[ecol])
if (type != 'volume'):
N += 1 # we count here unless volume is the only variable
if (line[0:10] == 'DYNA PRESS'):
if (type == 'volume') or (type == 'enthalpy') or (type == 'jointEV'):
if (line[10:11] == ':'):
for i,e in enumerate(elements):
if (e[0:4] == 'VOLU'):
vcol = i
vmatch = True
elif (line[10:11] == '>'):
V_n[N] = float(elements[vcol])
if (type == 'volume'):
N += 1 # we only count here when volume is the only variable
return U_n,V_n,N
def read_desmond(lines,type,N_max):
# reads desmond .ene files
# allocate space
U_n = numpy.zeros([N_max], dtype=numpy.float64) # U_n[k,n] is the energy of the sample n
V_n = numpy.zeros([N_max], dtype=numpy.float64) # V_n[k,n] is the volume of the sample n
N = 0
ematch = False
vmatch = False
for line in lines:
# Split line into elements.
if (line[0:13] == '# 0:time (ps)'): # this line tells us what column is what
elements = line.split()
for e in (elements):
if (e[0] != '(') and e[0] != '#':
(num,val) = e.split(':')
if (type == 'potential'):
if val == 'E_p':
ecol = int(num)
ematch = True
if (type == 'total') or (type == 'volume') or (type == 'enthalpy') or (type == 'jointEV'):
if val == 'E':
ecol = int(num)
ematch = True
if (type == 'kinetic'):
if val == 'E_k':
ecol = int(num)
ematch = True
if (type == 'volume') or (type == 'enthalpy') or (type == 'jointEV'):
if val == 'V':
vcol = int(num)
vmatch = True
if ((line[0] != '#') and (line != '\n')):
elements = line.split()
# what is the time of the sample
if (type != 'volume'):
energy = float(elements[ecol])
U_n[N] = energy
if (type == 'volume') or (type == 'enthalpy') or (type == 'jointEV'):
volume = float(elements[vcol])
V_n[N] = volume
N += 1
return U_n,V_n,N
def getefficiency(N_k,U_kn,V_kn,N_kn,type):
K = len(N_k)
g = numpy.ones(K)
ge = numpy.ones(K);
gv = numpy.ones(K);
gn = numpy.ones(K);
if (type != 'volume') and (type != 'number'):
for k in range(K):
ge[k] = timeseries.statisticalInefficiency(U_kn[k,0:N_k[k]],fast=False)
print "Calculating ["
for k in range(K):
print " %.3f " % (ge[k])
print "] as the statistical inefficiencies of the energy"
if type in requireV:
for k in range(K):
gv[k] = timeseries.statisticalInefficiency(V_kn[k,0:N_k[k]],fast=False)
print "Calculating ["
for k in range(K):
print " %.3f " % (gv[k])
print "] as the statistical inefficiencies of the volume"
if type in requireN:
for k in range(K):
gn[k] = timeseries.statisticalInefficiency(N_kn[k,0:N_k[k]],fast=False)
print "Calculating ["
for k in range(K):
print " %.3f " % (gn[k])
print "] as the statistical inefficiencies of the particle number"
for k in range(K):
g[k] = numpy.max([ge[k],gv[k],gn[k]])
print "Using ["
for k in range(K):
print " %.3f " % (g[k])
print "] as the statistical inefficiencies"
return g
def subsample(N_k,U_kn,V_kn,N_kn,g,type):
K = len(N_k)
N_k_sampled = numpy.zeros(K, dtype=numpy.int)
tempspace = numpy.zeros(numpy.max(N_k))
for k in range(K):
if (type != 'volume') and (type != 'number'):
indices = timeseries.subsampleCorrelatedData(U_kn[k,0:N_k[k]],g[k])
tempspace = U_kn[k,indices].copy()
N_k_sampled[k] = numpy.size(indices)
U_kn[k,0:N_k_sampled[k]] = tempspace[0:N_k_sampled[k]]
if (type in requireV):
indices = timeseries.subsampleCorrelatedData(V_kn[k,0:N_k[k]],g[k])
tempspace = V_kn[k,indices].copy()
N_k_sampled[k] = numpy.size(indices)
V_kn[k,0:N_k_sampled[k]] = tempspace[0:N_k_sampled[k]]
if (type in requireN):
indices = timeseries.subsampleCorrelatedData(N_kn[k,0:N_k[k]],g[k])
tempspace = N_kn[k,indices].copy()
N_k_sampled[k] = numpy.size(indices)
N_kn[k,0:N_k_sampled[k]] = tempspace[0:N_k_sampled[k]]
print "data has been subsampled using the statistical inefficiencies"
g[k] = 1.0
N_k[k] = N_k_sampled[k]
|
shirtsgroup/checkensemble
|
checkensemble/readmdfiles.py
|
Python
|
gpl-2.0
| 12,189
|
[
"Desmond"
] |
4b180f6f83252b029641b1b96e71684bfac3e5cb45055e3b4318aa517f5ec446
|
# coding: utf-8
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
from __future__ import unicode_literals
"""
This module implements equivalents of the basic ComputedEntry objects, which
is the basic entity that can be used to perform many analyses. ComputedEntries
contain calculated information, typically from VASP or other electronic
structure codes. For example, ComputedEntries can be used as inputs for phase
diagram analysis.
"""
__author__ = "Shyue Ping Ong, Anubhav Jain"
__copyright__ = "Copyright 2011, The Materials Project"
__version__ = "1.1"
__maintainer__ = "Shyue Ping Ong"
__email__ = "shyuep@gmail.com"
__status__ = "Production"
__date__ = "Apr 30, 2012"
import json
from monty.json import MontyEncoder, MontyDecoder
from pymatgen.phasediagram.entries import PDEntry
from pymatgen.core.composition import Composition
from pymatgen.serializers.json_coders import PMGSONable
class ComputedEntry(PDEntry, PMGSONable):
"""
An lightweight ComputedEntry object containing key computed data
for many purposes. Extends a PDEntry so that it can be used for phase
diagram generation. The difference between a ComputedEntry and a standard
PDEntry is that it includes additional parameters like a correction and
run_parameters.
"""
def __init__(self, composition, energy, correction=0.0, parameters=None,
data=None, entry_id=None, attribute=None):
"""
Initializes a ComputedEntry.
Args:
composition (Composition): Composition of the entry. For
flexibility, this can take the form of all the typical input
taken by a Composition, including a {symbol: amt} dict,
a string formula, and others.
energy (float): Energy of the entry. Usually the final calculated
energy from VASP or other electronic structure codes.
correction (float): A correction to be applied to the energy.
This is used to modify the energy for certain analyses.
Defaults to 0.0.
parameters (dict): An optional dict of parameters associated with
the entry. Defaults to None.
data (dict): An optional dict of any additional data associated
with the entry. Defaults to None.
entry_id (obj): An optional id to uniquely identify the entry.
attribute: Optional attribute of the entry. This can be used to
specify that the entry is a newly found compound, or to specify
a particular label for the entry, or else ... Used for further
analysis and plotting purposes. An attribute can be anything
but must be PMGSONable.
"""
self.uncorrected_energy = energy
self.composition = Composition(composition)
self.correction = correction
self.parameters = parameters if parameters else {}
self.data = data if data else {}
self.entry_id = entry_id
self.name = self.composition.reduced_formula
self.attribute = attribute
@property
def energy(self):
"""
Returns the *corrected* energy of the entry.
"""
return self.uncorrected_energy + self.correction
def __repr__(self):
output = ["ComputedEntry {}".format(self.composition.formula),
"Energy = {:.4f}".format(self.uncorrected_energy),
"Correction = {:.4f}".format(self.correction), "Parameters:"]
for k, v in self.parameters.items():
output.append("{} = {}".format(k, v))
output.append("Data:")
for k, v in self.data.items():
output.append("{} = {}".format(k, v))
return "\n".join(output)
def __str__(self):
return self.__repr__()
@classmethod
def from_dict(cls, d):
dec = MontyDecoder()
return cls(d["composition"], d["energy"], d["correction"],
dec.process_decoded(d.get("parameters", {})),
dec.process_decoded(d.get("data", {})),
entry_id=d.get("entry_id", None),
attribute=d["attribute"] if "attribute" in d else None)
def as_dict(self):
return {"@module": self.__class__.__module__,
"@class": self.__class__.__name__,
"energy": self.uncorrected_energy,
"composition": self.composition.as_dict(),
"correction": self.correction,
"parameters": json.loads(json.dumps(self.parameters,
cls=MontyEncoder)),
"data": json.loads(json.dumps(self.data, cls=MontyEncoder)),
"entry_id": self.entry_id,
"attribute": self.attribute}
class ComputedStructureEntry(ComputedEntry):
"""
A heavier version of ComputedEntry which contains a structure as well. The
structure is needed for some analyses.
"""
def __init__(self, structure, energy, correction=0.0, parameters=None,
data=None, entry_id=None):
"""
Initializes a ComputedStructureEntry.
Args:
structure (Structure): The actual structure of an entry.
energy (float): Energy of the entry. Usually the final calculated
energy from VASP or other electronic structure codes.
correction (float): A correction to be applied to the energy.
This is used to modify the energy for certain analyses.
Defaults to 0.0.
parameters (dict): An optional dict of parameters associated with
the entry. Defaults to None.
data (dict): An optional dict of any additional data associated
with the entry. Defaults to None.
entry_id (obj): An optional id to uniquely identify the entry.
"""
super(ComputedStructureEntry, self).__init__(
structure.composition, energy, correction=correction,
parameters=parameters, data=data, entry_id=entry_id)
self.structure = structure
def __repr__(self):
output = ["ComputedStructureEntry {}".format(self.composition.formula),
"Energy = {:.4f}".format(self.uncorrected_energy),
"Correction = {:.4f}".format(self.correction), "Parameters:"]
for k, v in self.parameters.items():
output.append("{} = {}".format(k, v))
output.append("Data:")
for k, v in self.data.items():
output.append("{} = {}".format(k, v))
return "\n".join(output)
def __str__(self):
return self.__repr__()
def as_dict(self):
d = super(ComputedStructureEntry, self).as_dict()
d["@module"] = self.__class__.__module__
d["@class"] = self.__class__.__name__
d["structure"] = self.structure.as_dict()
return d
@classmethod
def from_dict(cls, d):
dec = MontyDecoder()
return cls(dec.process_decoded(d["structure"]),
d["energy"], d["correction"],
dec.process_decoded(d.get("parameters", {})),
dec.process_decoded(d.get("data", {})),
entry_id=d.get("entry_id", None))
|
sonium0/pymatgen
|
pymatgen/entries/computed_entries.py
|
Python
|
mit
| 7,343
|
[
"VASP",
"pymatgen"
] |
8148ad0780cca96993562489ee0541310254e3f3fa41f2af58f7a5d3f6b765a5
|
# encoding: utf-8
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'EphysPropSummary'
db.create_table('neuroelectro_ephyspropsummary', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('ephys_prop', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['neuroelectro.EphysProp'])),
('num_nemds', self.gf('django.db.models.fields.IntegerField')(null=True)),
('num_neurons', self.gf('django.db.models.fields.IntegerField')(null=True)),
('num_articles', self.gf('django.db.models.fields.IntegerField')(null=True)),
('date_mod', self.gf('django.db.models.fields.DateTimeField')(auto_now=True, blank=True)),
))
db.send_create_signal('neuroelectro', ['EphysPropSummary'])
# Adding model 'NeuronSummary'
db.create_table('neuroelectro_neuronsummary', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('neuron', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['neuroelectro.Neuron'])),
('num_nemds', self.gf('django.db.models.fields.IntegerField')(null=True)),
('num_articles', self.gf('django.db.models.fields.IntegerField')(null=True)),
('date_mod', self.gf('django.db.models.fields.DateTimeField')(auto_now=True, blank=True)),
))
db.send_create_signal('neuroelectro', ['NeuronSummary'])
# Adding model 'ArticleSummary'
db.create_table('neuroelectro_articlesummary', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('article', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['neuroelectro.Article'])),
('num_nemds', self.gf('django.db.models.fields.IntegerField')(null=True)),
('num_neurons', self.gf('django.db.models.fields.IntegerField')(null=True)),
('author_list_str', self.gf('django.db.models.fields.CharField')(max_length=500, null=True)),
('date_mod', self.gf('django.db.models.fields.DateTimeField')(auto_now=True, blank=True)),
))
db.send_create_signal('neuroelectro', ['ArticleSummary'])
def backwards(self, orm):
# Deleting model 'EphysPropSummary'
db.delete_table('neuroelectro_ephyspropsummary')
# Deleting model 'NeuronSummary'
db.delete_table('neuroelectro_neuronsummary')
# Deleting model 'ArticleSummary'
db.delete_table('neuroelectro_articlesummary')
models = {
'neuroelectro.article': {
'Meta': {'object_name': 'Article'},
'abstract': ('django.db.models.fields.CharField', [], {'max_length': '10000', 'null': 'True'}),
'authors': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['neuroelectro.Author']", 'null': 'True', 'symmetrical': 'False'}),
'full_text_link': ('django.db.models.fields.CharField', [], {'max_length': '1000', 'null': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'journal': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['neuroelectro.Journal']", 'null': 'True'}),
'pmid': ('django.db.models.fields.IntegerField', [], {}),
'pub_year': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'substances': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['neuroelectro.Substance']", 'null': 'True', 'symmetrical': 'False'}),
'terms': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['neuroelectro.MeshTerm']", 'null': 'True', 'symmetrical': 'False'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '500'})
},
'neuroelectro.articlefulltext': {
'Meta': {'object_name': 'ArticleFullText'},
'article': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['neuroelectro.Article']"}),
'full_text': ('picklefield.fields.PickledObjectField', [], {'null': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'})
},
'neuroelectro.articlesummary': {
'Meta': {'object_name': 'ArticleSummary'},
'article': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['neuroelectro.Article']"}),
'author_list_str': ('django.db.models.fields.CharField', [], {'max_length': '500', 'null': 'True'}),
'date_mod': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'num_nemds': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'num_neurons': ('django.db.models.fields.IntegerField', [], {'null': 'True'})
},
'neuroelectro.author': {
'Meta': {'object_name': 'Author'},
'first': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'initials': ('django.db.models.fields.CharField', [], {'max_length': '10', 'null': 'True'}),
'last': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True'})
},
'neuroelectro.brainregion': {
'Meta': {'object_name': 'BrainRegion'},
'abbrev': ('django.db.models.fields.CharField', [], {'max_length': '10'}),
'allenid': ('django.db.models.fields.IntegerField', [], {'default': '0', 'null': 'True'}),
'color': ('django.db.models.fields.CharField', [], {'max_length': '10', 'null': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'isallen': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '500'}),
'treedepth': ('django.db.models.fields.IntegerField', [], {'null': 'True'})
},
'neuroelectro.datatable': {
'Meta': {'object_name': 'DataTable'},
'article': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['neuroelectro.Article']"}),
'ephys_props': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['neuroelectro.EphysProp']", 'null': 'True', 'through': "orm['neuroelectro.EphysConceptMap']", 'symmetrical': 'False'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'link': ('django.db.models.fields.CharField', [], {'max_length': '1000'}),
'needs_expert': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'neurons': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['neuroelectro.Neuron']", 'null': 'True', 'symmetrical': 'False'}),
'table_html': ('picklefield.fields.PickledObjectField', [], {'null': 'True'}),
'table_text': ('django.db.models.fields.CharField', [], {'max_length': '10000', 'null': 'True'})
},
'neuroelectro.datatabletag': {
'Meta': {'object_name': 'DataTableTag'},
'ephys_prop': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['neuroelectro.EphysProp']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'neuron': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['neuroelectro.Neuron']"})
},
'neuroelectro.ephysconceptmap': {
'Meta': {'object_name': 'EphysConceptMap'},
'added_by': ('django.db.models.fields.CharField', [], {'default': "'robot'", 'max_length': '20'}),
'data_table': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['neuroelectro.DataTable']", 'null': 'True'}),
'date_mod': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'dt_id': ('django.db.models.fields.CharField', [], {'max_length': '20', 'null': 'True'}),
'ephys_prop': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['neuroelectro.EphysProp']"}),
'ephys_prop_syn': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['neuroelectro.EphysPropSyn']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'match_quality': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'ref_text': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'times_validated': ('django.db.models.fields.IntegerField', [], {'default': '0'})
},
'neuroelectro.ephysprop': {
'Meta': {'object_name': 'EphysProp'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'unit': ('django.db.models.fields.CharField', [], {'max_length': '20', 'null': 'True'})
},
'neuroelectro.ephyspropsummary': {
'Meta': {'object_name': 'EphysPropSummary'},
'date_mod': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'ephys_prop': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['neuroelectro.EphysProp']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'num_articles': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'num_nemds': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'num_neurons': ('django.db.models.fields.IntegerField', [], {'null': 'True'})
},
'neuroelectro.ephyspropsyn': {
'Meta': {'object_name': 'EphysPropSyn'},
'ephys_prop': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['neuroelectro.EphysProp']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'term': ('django.db.models.fields.CharField', [], {'max_length': '200'})
},
'neuroelectro.insituexpt': {
'Meta': {'object_name': 'InSituExpt'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'imageseriesid': ('django.db.models.fields.IntegerField', [], {}),
'plane': ('django.db.models.fields.CharField', [], {'max_length': '20'}),
'regionexprs': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['neuroelectro.RegionExpr']", 'null': 'True', 'symmetrical': 'False'}),
'valid': ('django.db.models.fields.BooleanField', [], {'default': 'True'})
},
'neuroelectro.journal': {
'Meta': {'object_name': 'Journal'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'short_title': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '300'})
},
'neuroelectro.meshterm': {
'Meta': {'object_name': 'MeshTerm'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'term': ('django.db.models.fields.CharField', [], {'max_length': '300'})
},
'neuroelectro.neuron': {
'Meta': {'object_name': 'Neuron'},
'added_by': ('django.db.models.fields.CharField', [], {'max_length': '20', 'null': 'True'}),
'date_mod': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'null': 'True', 'blank': 'True'}),
'defining_articles': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['neuroelectro.Article']", 'null': 'True', 'symmetrical': 'False'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '500'}),
'nlex_id': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True'}),
'regions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['neuroelectro.BrainRegion']", 'null': 'True', 'symmetrical': 'False'}),
'synonyms': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['neuroelectro.NeuronSyn']", 'null': 'True', 'symmetrical': 'False'})
},
'neuroelectro.neuronarticlemap': {
'Meta': {'object_name': 'NeuronArticleMap'},
'added_by': ('django.db.models.fields.CharField', [], {'default': "'robot'", 'max_length': '20'}),
'article': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['neuroelectro.Article']", 'null': 'True'}),
'date_mod': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'neuron': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['neuroelectro.Neuron']"}),
'neuron_syn': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['neuroelectro.NeuronSyn']", 'null': 'True'}),
'num_mentions': ('django.db.models.fields.IntegerField', [], {'null': 'True'})
},
'neuroelectro.neuronconceptmap': {
'Meta': {'object_name': 'NeuronConceptMap'},
'added_by': ('django.db.models.fields.CharField', [], {'default': "'robot'", 'max_length': '20'}),
'data_table': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['neuroelectro.DataTable']", 'null': 'True'}),
'date_mod': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'dt_id': ('django.db.models.fields.CharField', [], {'max_length': '20', 'null': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'match_quality': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'neuron': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['neuroelectro.Neuron']"}),
'neuron_syn': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['neuroelectro.NeuronSyn']", 'null': 'True'}),
'ref_text': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'times_validated': ('django.db.models.fields.IntegerField', [], {'default': '0'})
},
'neuroelectro.neuronephysdatamap': {
'Meta': {'object_name': 'NeuronEphysDataMap'},
'added_by': ('django.db.models.fields.CharField', [], {'default': "'robot'", 'max_length': '20'}),
'data_table': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['neuroelectro.DataTable']", 'null': 'True'}),
'date_mod': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'dt_id': ('django.db.models.fields.CharField', [], {'max_length': '20', 'null': 'True'}),
'ephys_concept_map': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['neuroelectro.EphysConceptMap']"}),
'err': ('django.db.models.fields.FloatField', [], {'null': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'n': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'neuron_concept_map': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['neuroelectro.NeuronConceptMap']"}),
'ref_text': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'times_validated': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'val': ('django.db.models.fields.FloatField', [], {})
},
'neuroelectro.neuronephyslink': {
'Meta': {'object_name': 'NeuronEphysLink'},
'data_table': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['neuroelectro.DataTable']"}),
'ephys_prop': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['neuroelectro.EphysProp']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'neuron': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['neuroelectro.Neuron']"}),
'num_reps': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'val': ('django.db.models.fields.FloatField', [], {}),
'val_err': ('django.db.models.fields.FloatField', [], {'null': 'True'})
},
'neuroelectro.neuronsummary': {
'Meta': {'object_name': 'NeuronSummary'},
'date_mod': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'neuron': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['neuroelectro.Neuron']"}),
'num_articles': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'num_nemds': ('django.db.models.fields.IntegerField', [], {'null': 'True'})
},
'neuroelectro.neuronsyn': {
'Meta': {'object_name': 'NeuronSyn'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'term': ('django.db.models.fields.CharField', [], {'max_length': '500'})
},
'neuroelectro.protein': {
'Meta': {'object_name': 'Protein'},
'allenid': ('django.db.models.fields.IntegerField', [], {}),
'common_name': ('django.db.models.fields.CharField', [], {'max_length': '400', 'null': 'True'}),
'entrezid': ('django.db.models.fields.IntegerField', [], {}),
'gene': ('django.db.models.fields.CharField', [], {'max_length': '20'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'in_situ_expts': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['neuroelectro.InSituExpt']", 'null': 'True', 'symmetrical': 'False'}),
'is_channel': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '400'}),
'synonyms': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['neuroelectro.ProteinSyn']", 'null': 'True', 'symmetrical': 'False'})
},
'neuroelectro.proteinsyn': {
'Meta': {'object_name': 'ProteinSyn'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'term': ('django.db.models.fields.CharField', [], {'max_length': '500'})
},
'neuroelectro.regionexpr': {
'Meta': {'object_name': 'RegionExpr'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'region': ('django.db.models.fields.related.ForeignKey', [], {'default': '0', 'to': "orm['neuroelectro.BrainRegion']"}),
'val': ('django.db.models.fields.FloatField', [], {})
},
'neuroelectro.species': {
'Meta': {'object_name': 'Species'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'specie': ('django.db.models.fields.CharField', [], {'max_length': '500'})
},
'neuroelectro.substance': {
'Meta': {'object_name': 'Substance'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'term': ('django.db.models.fields.CharField', [], {'max_length': '300'})
},
'neuroelectro.superprotein': {
'Meta': {'object_name': 'SuperProtein'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_channel': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '400'}),
'synonyms': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['neuroelectro.ProteinSyn']", 'null': 'True', 'symmetrical': 'False'})
},
'neuroelectro.unit': {
'Meta': {'object_name': 'Unit'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '20'})
}
}
complete_apps = ['neuroelectro']
|
neuroelectro/neuroelectro_org
|
neuroelectro/south_migrations/0029_auto__add_ephyspropsummary__add_neuronsummary__add_articlesummary.py
|
Python
|
gpl-2.0
| 21,123
|
[
"NEURON"
] |
2ea19aef9386a528218807decd0e2503f66becc2f132d0deed4a9a76008e6fce
|
#!/usr/bin/env python
from __future__ import print_function
from jinja2 import Environment, FileSystemLoader
import argparse
import urllib2
import csv
import os
import shutil
import sys
import os.path
import subprocess
UBUNTU_RELEASE_URL = 'http://cloud-images.ubuntu.com/query/trusty/server/released.current.txt' # NOQA
UBUNTU_RELEASE_FIELD_NAMES = ['version', 'version_type', 'release_status',
'date', 'storage', 'arch', 'region', 'id',
'kernel', 'unknown_col', 'virtualization_type']
def get_latest_ami(region='us-east-1'):
response = urllib2.urlopen(UBUNTU_RELEASE_URL).readlines()
reader = csv.DictReader(response, fieldnames=UBUNTU_RELEASE_FIELD_NAMES,
delimiter='\t')
def ami_filter(ami):
"""Helper function to filter AMIs"""
return (ami['region'] == region and
ami['arch'] == 'amd64' and
ami['storage'] == 'ebs-ssd' and
ami['virtualization_type'] == 'hvm')
return [row for row in reader if ami_filter(row)][0]['id']
def get_project_root():
return os.path.dirname(os.path.abspath(__file__))
def update_ansible_roles(ansible_dir):
ansible_command = ['ansible-galaxy',
'install',
'-f',
'-r', 'roles.yml',
'-p', os.path.join(ansible_dir, 'roles')]
print(subprocess.check_output(ansible_command, cwd=ansible_dir),
file=sys.stderr)
def purge_role_examples(ansible_dir):
ansible_roles_path = os.path.join(ansible_dir, 'roles')
for role_path in os.listdir(ansible_roles_path):
examples_path = os.path.join(ansible_roles_path, role_path, 'examples')
if role_path.startswith('azavea') and os.path.isdir(examples_path):
shutil.rmtree(examples_path)
def render_packer_config(tmpl, **kwargs):
packer_dir = os.path.join(get_project_root(), 'packer')
jinja_env = Environment(loader=FileSystemLoader(packer_dir))
return jinja_env.get_template('template.tmpl').render(**kwargs)
def main():
common_parser = argparse.ArgumentParser(add_help=False)
parser = argparse.ArgumentParser()
subparsers = parser.add_subparsers(title='Deep Learning Commands')
create_ami = subparsers.add_parser('packer-config', help='Create Packer '
'configuration',
parents=[common_parser])
create_ami.add_argument('--instance-type', type=str,
choices=['g2.2xlarge', 'g2.8xlarge'],
default='g2.2xlarge', help='Packer instance type')
create_ami.add_argument('--source-ami', type=str, default=get_latest_ami(),
help='Base used to create Packer instance')
create_ami.add_argument('--spot-price', type=str, default="0.50",
help='Spot price for Packer instance')
create_ami.add_argument('--vpc-id', type=str, required=True,
help='VPC ID of VPC to launch Packer instance')
create_ami.add_argument('--subnet-id', type=str, required=True,
help='Subnet ID within VPC to launch Packer '
'instance')
create_ami.add_argument('--ansible-version', type=str, default="2.0.0.2",
help='Ansible version used to provision instance')
create_ami.set_defaults(func=create_ami)
args = parser.parse_args()
ansible_dir = os.path.join(get_project_root(), 'ansible')
update_ansible_roles(ansible_dir)
purge_role_examples(ansible_dir)
print(render_packer_config('template.tmpl', **vars(args)))
if __name__ == '__main__':
main()
|
azavea/docker-deep-learning
|
deployment/driver.py
|
Python
|
apache-2.0
| 3,798
|
[
"Galaxy"
] |
3afaefb6f039b94de2807f54934e75d01ebb3d288a5d298b42e16531cbce960a
|
# encoding: utf-8
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Deleting field 'DataDomainIndex.groups'
db.delete_column(u'profiles_datadomainindex', 'groups_id')
# Adding field 'DataDomainIndex.group'
db.add_column(u'profiles_datadomainindex', 'group', self.gf('django.db.models.fields.related.ForeignKey')(default=1, to=orm['profiles.Group']), keep_default=False)
def backwards(self, orm):
# Adding field 'DataDomainIndex.groups'
db.add_column(u'profiles_datadomainindex', 'groups', self.gf('django.db.models.fields.related.ForeignKey')(default=1, to=orm['profiles.Group']), keep_default=False)
# Deleting field 'DataDomainIndex.group'
db.delete_column(u'profiles_datadomainindex', 'group_id')
models = {
u'auth.group': {
'Meta': {'object_name': 'Group'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
u'auth.permission': {
'Meta': {'ordering': "(u'content_type__app_label', u'content_type__model', u'codename')", 'unique_together': "((u'content_type', u'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
u'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2013, 12, 11, 11, 4, 51, 765794)'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2013, 12, 11, 11, 4, 51, 764476)'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
u'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
u'maps.shapefile': {
'Meta': {'object_name': 'ShapeFile'},
'color': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'geo_key_column': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'geo_meta_key_column': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'geom_type': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'label_column': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'shape_file': ('django.db.models.fields.files.FileField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'zoom_threshold': ('django.db.models.fields.IntegerField', [], {'default': '5'})
},
u'profiles.customvalue': {
'Meta': {'object_name': 'CustomValue'},
'data_type': ('django.db.models.fields.CharField', [], {'default': "'COUNT'", 'max_length': '30'}),
'display_value': ('django.db.models.fields.CharField', [], {'max_length': "'255'"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'indicator': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['profiles.Indicator']"}),
'supress': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'value_operator': ('django.db.models.fields.CharField', [], {'max_length': "'255'"})
},
u'profiles.datadomain': {
'Meta': {'ordering': "['weight']", 'object_name': 'DataDomain'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'indicators': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['profiles.Indicator']", 'through': u"orm['profiles.IndicatorDomain']", 'symmetrical': 'False'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '100'}),
'order': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '100', 'db_index': 'True'}),
'subdomain_only': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'subdomains': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['profiles.DataDomain']", 'symmetrical': 'False', 'blank': 'True'}),
'weight': ('django.db.models.fields.PositiveIntegerField', [], {'default': '1'})
},
u'profiles.datadomainindex': {
'Meta': {'ordering': "['order']", 'object_name': 'DataDomainIndex'},
'dataDomain': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['profiles.DataDomain']"}),
'group': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['profiles.Group']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'order': ('django.db.models.fields.PositiveIntegerField', [], {'default': '1', 'db_index': 'True'})
},
u'profiles.datapoint': {
'Meta': {'unique_together': "(('indicator', 'record', 'time'),)", 'object_name': 'DataPoint'},
'change_from_time': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'datapoint_as_change_from'", 'null': 'True', 'to': u"orm['profiles.Time']"}),
'change_to_time': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'datapoint_as_change_to'", 'null': 'True', 'to': u"orm['profiles.Time']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'image': ('sorl.thumbnail.fields.ImageField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'indicator': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['profiles.Indicator']"}),
'record': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['profiles.GeoRecord']"}),
'time': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['profiles.Time']", 'null': 'True'})
},
u'profiles.datasource': {
'Meta': {'object_name': 'DataSource'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'implementation': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '100'})
},
u'profiles.denominator': {
'Meta': {'object_name': 'Denominator'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'indicator': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['profiles.Indicator']"}),
'label': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'multiplier': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True', 'blank': 'True'}),
'sort': ('django.db.models.fields.PositiveIntegerField', [], {'default': '1'})
},
u'profiles.denominatorpart': {
'Meta': {'object_name': 'DenominatorPart'},
'data': ('django.db.models.fields.files.FileField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'data_source': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['profiles.DataSource']"}),
'denominator': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['profiles.Denominator']"}),
'formula': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'indicator': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['profiles.Indicator']"}),
'part': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['profiles.IndicatorPart']"})
},
u'profiles.flatvalue': {
'Meta': {'object_name': 'FlatValue'},
'display_title': ('django.db.models.fields.CharField', [], {'max_length': "'255'", 'db_index': 'True'}),
'f_moe': ('django.db.models.fields.CharField', [], {'max_length': "'255'", 'null': 'True', 'blank': 'True'}),
'f_number': ('django.db.models.fields.CharField', [], {'max_length': "'255'", 'null': 'True', 'blank': 'True'}),
'f_percent': ('django.db.models.fields.CharField', [], {'max_length': "'255'", 'null': 'True', 'blank': 'True'}),
'geography': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['profiles.GeoRecord']"}),
'geography_name': ('django.db.models.fields.CharField', [], {'max_length': "'255'"}),
'geography_slug': ('django.db.models.fields.CharField', [], {'max_length': "'255'", 'db_index': 'True'}),
'geometry_id': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'indicator': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['profiles.Indicator']"}),
'indicator_slug': ('django.db.models.fields.CharField', [], {'max_length': "'255'", 'db_index': 'True'}),
'moe': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '10', 'decimal_places': '2', 'blank': 'True'}),
'number': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '10', 'decimal_places': '2', 'blank': 'True'}),
'percent': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '10', 'decimal_places': '2', 'blank': 'True'}),
'time_key': ('django.db.models.fields.CharField', [], {'max_length': "'255'"}),
'value_type': ('django.db.models.fields.CharField', [], {'max_length': "'100'"})
},
u'profiles.geolevel': {
'Meta': {'object_name': 'GeoLevel'},
'data_sources': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['profiles.DataSource']", 'symmetrical': 'False', 'blank': 'True'}),
'display_name': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '200', 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200', 'db_index': 'True'}),
'parent': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['profiles.GeoLevel']", 'null': 'True', 'blank': 'True'}),
'shapefile': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['maps.ShapeFile']", 'null': 'True', 'blank': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '200', 'db_index': 'True'}),
'summary_level': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'year': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'})
},
u'profiles.georecord': {
'Meta': {'unique_together': "(('slug', 'level'), ('level', 'geo_id', 'custom_name', 'owner'))", 'object_name': 'GeoRecord'},
'components': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'components_rel_+'", 'blank': 'True', 'to': u"orm['profiles.GeoRecord']"}),
'custom_name': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'geo_id': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'geo_searchable': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'level': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['profiles.GeoLevel']"}),
'mappings': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'mappings_rel_+'", 'blank': 'True', 'to': u"orm['profiles.GeoRecord']"}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100', 'db_index': 'True'}),
'notes': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'owner': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']", 'null': 'True', 'blank': 'True'}),
'parent': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['profiles.GeoRecord']", 'null': 'True', 'blank': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'db_index': 'True', 'max_length': '100', 'blank': 'True'})
},
u'profiles.group': {
'Meta': {'object_name': 'Group'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'order': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '100', 'db_index': 'True'})
},
u'profiles.groupindex': {
'Meta': {'ordering': "['order']", 'object_name': 'GroupIndex'},
'groups': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['profiles.Group']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'indicators': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'groups'", 'to': u"orm['profiles.Indicator']"}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'order': ('django.db.models.fields.PositiveIntegerField', [], {'default': '1', 'db_index': 'True'})
},
u'profiles.indicator': {
'Meta': {'object_name': 'Indicator'},
'data_domains': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['profiles.DataDomain']", 'through': u"orm['profiles.IndicatorDomain']", 'symmetrical': 'False'}),
'data_type': ('django.db.models.fields.CharField', [], {'default': "'COUNT'", 'max_length': '30'}),
'display_change': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'display_distribution': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'display_name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'display_percent': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'last_generated_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'levels': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['profiles.GeoLevel']", 'symmetrical': 'False'}),
'limitations': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'long_definition': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '100', 'db_index': 'True'}),
'notes': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'published': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'purpose': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'routine_use': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'short_definition': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '100', 'db_index': 'True'}),
'source': ('django.db.models.fields.CharField', [], {'default': "'U.S. Census Bureau'", 'max_length': '300', 'blank': 'True'}),
'universe': ('django.db.models.fields.CharField', [], {'max_length': '300', 'blank': 'True'})
},
u'profiles.indicatordomain': {
'Meta': {'object_name': 'IndicatorDomain'},
'default': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'domain': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['profiles.DataDomain']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'indicator': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['profiles.Indicator']"})
},
u'profiles.indicatorpart': {
'Meta': {'object_name': 'IndicatorPart'},
'data': ('django.db.models.fields.files.FileField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'data_source': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['profiles.DataSource']"}),
'formula': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'indicator': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['profiles.Indicator']"}),
'time': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['profiles.Time']"})
},
u'profiles.legendoption': {
'Meta': {'object_name': 'LegendOption'},
'bin_options': ('django.db.models.fields.TextField', [], {'default': "''"}),
'bin_type': ('django.db.models.fields.CharField', [], {'default': "'jenks'", 'max_length': '255'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'indicator': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['profiles.Indicator']"})
},
u'profiles.precalculatedvalue': {
'Meta': {'object_name': 'PrecalculatedValue'},
'data_source': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['profiles.DataSource']"}),
'geo_record': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['profiles.GeoRecord']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'notes': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'table': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'value': ('django.db.models.fields.TextField', [], {'blank': 'True'})
},
u'profiles.taskstatus': {
'Meta': {'object_name': 'TaskStatus'},
'error': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'last_updated': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'null': 'True', 'blank': 'True'}),
'status': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
't_id': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'task': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'traceback': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'})
},
u'profiles.time': {
'Meta': {'object_name': 'Time'},
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '20'}),
'sort': ('django.db.models.fields.DecimalField', [], {'max_digits': '5', 'decimal_places': '1'})
},
u'profiles.value': {
'Meta': {'object_name': 'Value'},
'datapoint': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['profiles.DataPoint']"}),
'denominator': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['profiles.Denominator']", 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'moe': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '10', 'decimal_places': '2', 'blank': 'True'}),
'number': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '10', 'decimal_places': '2', 'blank': 'True'}),
'percent': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '10', 'decimal_places': '2', 'blank': 'True'})
}
}
complete_apps = ['profiles']
|
216software/Profiles
|
communityprofiles/profiles/oldmigrations/0069_auto__del_field_datadomainindex_groups__add_field_datadomainindex_grou.py
|
Python
|
mit
| 23,239
|
[
"MOE"
] |
74428156524dcd3e5073701e2044fd6bccf8ec3e78a6b41bd69e64e4aa2e64ed
|
#
# Copyright (C) 2013-2018 The ESPResSo project
#
# This file is part of ESPResSo.
#
# ESPResSo is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ESPResSo is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
"""
This sample simulates electrophoresis using P3M solver.
"""
import espressomd
required_features = ["P3M", "EXTERNAL_FORCES", "WCA"]
espressomd.assert_features(required_features)
from espressomd import thermostat
from espressomd import interactions
from espressomd import electrostatics
import sys
import numpy as np
try:
import cPickle as pickle
except ImportError:
import pickle
import os
print(espressomd.features())
# System parameters
#############################################################
system = espressomd.System(box_l=[100.0, 100.0, 100.0])
# Seed
#############################################################
system.set_random_state_PRNG()
#system.seed = system.cell_system.get_state()['n_nodes'] * [1234]
np.random.seed(seed=system.seed)
system.time_step = 0.01
system.cell_system.skin = 0.4
system.periodicity = [1, 1, 1]
system.thermostat.set_langevin(kT=1.0, gamma=1.0, seed=42)
# system.cell_system.set_n_square(use_verlet_lists=False)
system.cell_system.max_num_cells = 2744
# Non-bonded interactions
###############################################################
# WCA between monomers
system.non_bonded_inter[0, 0].wca.set_params(
epsilon=1, sigma=1)
# WCA counterions - polymer
system.non_bonded_inter[0, 1].wca.set_params(
epsilon=1, sigma=1)
# WCA ions - polymer
system.non_bonded_inter[0, 2].wca.set_params(
epsilon=1, sigma=1)
# WCA between ions
system.non_bonded_inter[1, 2].wca.set_params(
epsilon=1, sigma=1)
# Bonded interactions
################################################################
# fene = interactions.FeneBond(k=10, d_r_max=2)
# system.bonded_inter.add(fene)
harmonic = interactions.HarmonicBond(k=10, r_0=2)
harmonicangle = interactions.AngleHarmonic(bend=10, phi0=np.pi)
system.bonded_inter.add(harmonic)
system.bonded_inter.add(harmonicangle)
# Create Monomer beads and bonds
#########################################################################################
n_monomers = 20
init_polymer_pos = np.dstack(
(np.arange(n_monomers), np.zeros(n_monomers), np.zeros(n_monomers)))[0] + \
np.array([system.box_l[0] / 2 - n_monomers / 2,
system.box_l[1] / 2,
system.box_l[2] / 2])
system.part.add(pos=init_polymer_pos)
# system.part[:-1].add_bond((harmonic, np.arange(n_monomers)[1:]))
# system.part[1:-1].add_bond((harmonicangle, np.arange(n_monomers)[:-2], np.arange(n_monomers)[2:]))
# Particle creation with loops:
for i in range(n_monomers):
if i > 0:
system.part[i].add_bond((harmonic, i - 1))
for i in range(1, n_monomers-1):
system.part[i].add_bond((harmonicangle, i - 1, i + 1))
system.part[:n_monomers].q = -np.ones(n_monomers)
# Create counterions
###################################################################
system.part.add(pos=np.random.random((n_monomers, 3)) * system.box_l,
q=np.ones(n_monomers, dtype=int),
type=np.ones(n_monomers, dtype=int))
# Create ions
###############################################################
n_ions = 100
system.part.add(pos=np.random.random((n_ions, 3)) * system.box_l,
q=np.hstack((np.ones(n_ions // 2), -np.ones(n_ions // 2))),
type=np.array(np.hstack((np.ones(n_ions // 2), 2 * np.ones(n_ions // 2))), dtype=int))
# Sign charges to particles after the particle creation:
# system.part[2*n_monomers:2*n_monomers+n_ions/2] = np.ones(n_ions/2)
# system.part[2*n_monomers+n_ions/2:] = -np.ones(n_ions/2)
print("types:", system.part[:].type)
print("")
print("Q_tot:", np.sum(system.part[:].q))
#############################################################
# Warmup #
#############################################################
system.force_cap = 10
for i in range(1000):
if i % 100 == 0:
sys.stdout.write("\rWarmup: %03i" % i)
sys.stdout.flush()
system.integrator.run(steps=1)
system.force_cap = 10*i
system.force_cap = 0
print("\nWarmup finished!\n")
#############################################################
# Sampling #
#############################################################
#
# Activate electrostatic with checkpoint example
#############################################################
read_checkpoint = False
# Load checkpointed p3m class
if os.path.isfile("p3m_checkpoint.pkl") and read_checkpoint is True:
print("reading p3m from file")
with open("p3m_checkpoint.pkl", "rb") as fp:
p3m = pickle.load(fp)
else:
p3m = electrostatics.P3M(prefactor=1.0, accuracy=1e-2)
print("Tuning P3M")
system.actors.add(p3m)
# Checkpoint AFTER tuning (adding method to actors)
with open("p3m_checkpoint.pkl", "wb") as fp:
pickle.dump(p3m, fp, -1)
print("P3M parameter:\n")
p3m_params = p3m.get_params()
for key in list(p3m_params.keys()):
print("{} = {}".format(key, p3m_params[key]))
print(system.actors)
# Apply external force
#############################################################
n_part = len(system.part)
system.part[:].ext_force = np.dstack(
(system.part[:].q * np.ones(n_part), np.zeros(n_part), np.zeros(n_part)))[0]
# print(system.part[:].ext_force)
# Activate LB
############################################################
# lbf = lb.LBF(dens=1, tau=0.01, visc=1, fric=1, agrid=1)
# system.actors.add(lbf)
# Data arrays
v_list = []
pos_list = []
# Sampling Loop
for i in range(4000):
if i % 100 == 0:
sys.stdout.write("\rSampling: %04i" % i)
sys.stdout.flush()
system.integrator.run(steps=1)
v_list.append(system.part[:n_monomers].v)
pos_list.append(system.part[:n_monomers].pos)
print("\nSampling finished!\n")
# Data evaluation
############################################################
# Convert data to numpy arrays
# shape = [time_step, monomer, coordinate]!
v_list = np.array(v_list)
pos_list = np.array(pos_list)
# Calculate COM and COM velocity
COM = pos_list.sum(axis=1) / n_monomers
COM_v = (COM[1:] - COM[:-1]) / system.time_step
# Calculate the Mobility mu = v/E
##################################
mu = COM_v.mean() / 1.0
print("MOBILITY", mu)
# Calculate the Persistence length
# fits better for longer sampling
##################################
# this calculation method requires
# numpy 1.10 or higher
if tuple(map(int, np.__version__.split("."))) >= (1, 10):
from scipy.optimize import curve_fit
from numpy.linalg import norm
# First get bond vectors
bond_vec = pos_list[:, 1:,:] - pos_list[:, :-1,:]
bond_abs = norm(bond_vec, axis=2, keepdims=True)
bond_abs_avg = bond_abs.mean(axis=0)[:, 0]
c_length = bond_abs_avg
for i in range(1, len(bond_abs_avg)):
c_length[i] += c_length[i - 1]
bv_norm = bond_vec / bond_abs
bv_zero = np.empty_like(bv_norm)
for i in range(bv_zero.shape[1]):
bv_zero[:, i,:] = bv_norm[:, 0,:]
# Calculate <cos(theta)>
cos_theta = (bv_zero * bv_norm).sum(axis=2).mean(axis=0)
def decay(x, lp):
return np.exp(-x / lp)
fit, _ = curve_fit(decay, c_length, cos_theta)
print(c_length.shape, cos_theta.shape)
print("PERSISTENCE LENGTH", fit[0])
# Plot Results
############################################################
import matplotlib.pyplot as pp
direction = ["x", "y", "z"]
fig1 = pp.figure()
ax = fig1.add_subplot(111)
for i in range(3):
ax.plot(COM[:-500, i], label="COM pos %s" % direction[i])
ax.legend(loc="best")
ax.set_xlabel("time step")
ax.set_ylabel("r")
fig2 = pp.figure()
ax = fig2.add_subplot(111)
for i in range(3):
ax.plot(COM_v[:-500, i], label="COM v %s" % direction[i])
ax.legend(loc="best")
ax.set_xlabel("time step")
ax.set_ylabel("v")
if tuple(map(int, np.__version__.split("."))) >= (1, 10):
fig3 = pp.figure()
ax = fig3.add_subplot(111)
ax.plot(c_length, cos_theta, label="sim data")
ax.plot(c_length, decay(c_length, fit[0]), label="fit")
ax.legend(loc="best")
ax.set_xlabel("contour length")
ax.set_ylabel(r"$\langle \cos(\theta) \rangle$")
pp.show()
print("\nJob finished!\n")
|
mkuron/espresso
|
samples/electrophoresis.py
|
Python
|
gpl-3.0
| 8,819
|
[
"ESPResSo"
] |
d2926a33cc5551ced3eadcda31a51491cbdf016999fa4a1e6409a2b590cca783
|
#--------------------------------------------------------------------------
# Software: InVesalius - Software de Reconstrucao 3D de Imagens Medicas
# Copyright: (C) 2001 Centro de Pesquisas Renato Archer
# Homepage: http://www.softwarepublico.gov.br
# Contact: invesalius@cti.gov.br
# License: GNU - GPL 2 (LICENSE.txt/LICENCA.txt)
#--------------------------------------------------------------------------
# Este programa e software livre; voce pode redistribui-lo e/ou
# modifica-lo sob os termos da Licenca Publica Geral GNU, conforme
# publicada pela Free Software Foundation; de acordo com a versao 2
# da Licenca.
#
# Este programa eh distribuido na expectativa de ser util, mas SEM
# QUALQUER GARANTIA; sem mesmo a garantia implicita de
# COMERCIALIZACAO ou de ADEQUACAO A QUALQUER PROPOSITO EM
# PARTICULAR. Consulte a Licenca Publica Geral GNU para obter mais
# detalhes.
#--------------------------------------------------------------------------
import os
import vtk
import nibabel as nib
import invesalius.constants as const
from invesalius import inv_paths
def ReadOthers(dir_):
"""
Read the given Analyze, NIfTI, Compressed NIfTI or PAR/REC file,
remove singleton image dimensions and convert image orientation to
RAS+ canonical coordinate system. Analyze header does not support
affine transformation matrix, though cannot be converted automatically
to canonical orientation.
:param dir_: file path
:return: imagedata object
"""
if not const.VTK_WARNING:
log_path = os.path.join(inv_paths.USER_LOG_DIR, 'vtkoutput.txt')
fow = vtk.vtkFileOutputWindow()
fow.SetFileName(log_path.encode(const.FS_ENCODE))
ow = vtk.vtkOutputWindow()
ow.SetInstance(fow)
try:
imagedata = nib.squeeze_image(nib.load(dir_))
imagedata = nib.as_closest_canonical(imagedata)
imagedata.update_header()
except(nib.filebasedimages.ImageFileError):
return False
return imagedata
|
rmatsuda/invesalius3
|
invesalius/reader/others_reader.py
|
Python
|
gpl-2.0
| 2,046
|
[
"VTK"
] |
9a5c5171a34050b97d038198b7b0e7962bb9b885c843efe8aa3875c8a3df557b
|
"""
This file provides the categories and sizes for different ships in the EVE
Online MMORPG by CCP games. Data from EVE University.
"""
CATEGORIES = {
'Abaddon': 'Battleship',
'Absolution': 'Command Ship',
'Aeon': 'Carrier',
'Algos': 'Destroyer',
'Anathema': 'Covert Ops',
'Anshar': 'Jump Freighter',
'Apocalypse': 'Battleship',
'Apocalypse Imperial Issue': 'Battleship',
'Apocalypse Navy Issue': 'Battleship',
'Apotheosis': 'Shuttle',
'Arazu': 'Recon Ship',
'Arbitrator': 'Cruiser',
'Archon': 'Carrier',
'Ares': 'Interceptor',
'Ark': 'Jump Freighter',
'Armageddon': 'Battleship',
'Ashimmu': 'Cruiser',
'Astarte': 'Command Ship',
'Astero': 'Frigate',
'Atron': 'Frigate',
'Augoror': 'Cruiser',
'Augoror Navy Issue': 'Cruiser',
'Avatar': 'Titan',
'Badger': 'Industrial Ship',
'Bantam': 'Frigate',
'Basilisk': 'Logistics Cruiser',
'Bellicose': 'Cruiser',
'Bestower': 'Industrial Ship',
'Bhaalgorn': 'Battleship',
'Blackbird': 'Cruiser',
'Breacher': 'Frigate',
'Broadsword': 'Heavy Interdictor',
'Brutix': 'Battlecruiser',
'Burst': 'Frigate',
'Bustard': 'Transport Ship',
'Buzzard': 'Covert Ops',
'Caldari Navy Hookbill': 'Frigate',
'Capsule': 'Capsule',
'Caracal': 'Cruiser',
'Catalyst': 'Destroyer',
'Celestis': 'Cruiser',
'Cerberus': 'Heavy Assault Cruiser',
'Chameleon': 'Recon Ship',
'Cheetah': 'Covert Ops',
'Chimera': 'Carrier',
'Claw': 'Interceptor',
'Claymore': 'Command Ship',
'Coercer': 'Destroyer',
'Condor': 'Frigate',
'Confessor': 'Tactical Destroyer',
'Corax': 'Destroyer',
'Cormorant': 'Destroyer',
'Covetor': 'Mining Barge',
'Crane': 'Transport Ship',
'Crow': 'Interceptor',
'Crucifier': 'Frigate',
'Cruor': 'Frigate',
'Crusader': 'Interceptor',
'Curse': 'Recon Ship',
'Cyclone': 'Battlecruiser',
'Cynabal': 'Cruiser',
'Damnation': 'Command Ship',
'Daredevil': 'Frigate',
'Deimos': 'Heavy Assault Cruiser',
'Devoter': 'Heavy Interdictor',
'Dominix': 'Battleship',
'Dragoon': 'Destroyer',
'Drake': 'Battlecruiser',
'Dramiel': 'Frigate',
'Eagle': 'Heavy Assault Cruiser',
'Enyo': 'Assault Frigate',
'Eos': 'Command Ship',
'Erebus': 'Titan',
'Eris': 'Interdictor',
'Executioner': 'Frigate',
'Exequror': 'Cruiser',
'Exequror Navy Issue': 'Cruiser',
'Falcon': 'Recon Ship',
'Federation Navy Comet': 'Frigate',
'Ferox': 'Battlecruiser',
'Flycatcher': 'Interdictor',
'Garmur': 'Frigate',
'Gila': 'Cruiser',
'Gnosis': 'Battlecruiser',
'Gold Magnate': 'Frigate',
'Golem': 'Marauder',
'Goru\'s Shuttle': 'Shuttle',
'Griffin': 'Frigate',
'Guardian': 'Logistics Cruiser',
'Guardian-Vexor': 'Cruiser',
'Guristas Shuttle': 'Shuttle',
'Harbinger': 'Battlecruiser',
'Harpy': 'Assault Frigate',
'Hawk': 'Assault Frigate',
'Hecate': 'Tactical Destroyer',
'Hel': 'Carrier',
'Helios': 'Covert Ops',
'Heretic': 'Interdictor',
'Heron': 'Frigate',
'Hoarder': 'Industrial Ship',
'Hound': 'Stealth Bomber',
'Huginn': 'Recon Ship',
'Hulk': 'Exhumer Barge',
'Hurricane': 'Battlecruiser',
'Hyena': 'Electronic Attack Frigate',
'Hyperion': 'Battleship',
'Ibis': 'Rookie Ship',
'Imicus': 'Frigate',
'Impairor': 'Rookie Ship',
'Impel': 'Transport Ship',
'Imperial Navy Slicer': 'Frigate',
'Incursus': 'Frigate',
'Inquisitor': 'Frigate',
'Ishkur': 'Assault Frigate',
'Ishtar': 'Heavy Assault Cruiser',
'Iteron Mark V': 'Industrial Ship',
'Jackdaw': 'Tactical Destroyer',
'Jaguar': 'Assault Frigate',
'Keres': 'Electronic Attack Frigate',
'Kestrel': 'Frigate',
'Kitsune': 'Electronic Attack Frigate',
'Kronos': 'Marauder',
'Kryos': 'Industrial Ship',
'Lachesis': 'Recon Ship',
'Legion': 'Strategic Cruiser',
'Leviathan': 'Titan',
'Loki': 'Strategic Cruiser',
'Machariel': 'Battleship',
'Mackinaw': 'Exhumer Barge',
'Maelstrom': 'Battleship',
'Magnate': 'Frigate',
'Malediction': 'Interceptor',
'Maller': 'Cruiser',
'Mammoth': 'Industrial Ship',
'Manticore': 'Stealth Bomber',
'Mastodon': 'Transport Ship',
'Maulus': 'Frigate',
'Megathron': 'Battleship',
'Megathron Federate Issue': 'Battleship',
'Megathron Navy Issue': 'Battleship',
'Merlin': 'Frigate',
'Moa': 'Cruiser',
'Moros': 'Dreadnought',
'Muninn': 'Heavy Assault Cruiser',
'Myrmidon': 'Battlecruiser',
'Naga': 'Attack Battlecruiser',
'Naglfar': 'Dreadnought',
'Navitas': 'Frigate',
'Nemesis': 'Stealth Bomber',
'Nereus': 'Industrial Ship',
'Nidhoggur': 'Carrier',
'Nighthawk': 'Command Ship',
'Nightmare': 'Battleship',
'Noctis': 'Industrial Ship',
'Nomad': 'Jump Freighter',
'Nyx': 'Carrier',
'Occator': 'Transport Ship',
'Omen': 'Cruiser',
'Omen Navy Issue': 'Cruiser',
'Oneiros': 'Logistics Cruiser',
'Onyx': 'Heavy Interdictor',
'Oracle': 'Attack Battlecruiser',
'Orca': 'Capital Industrial Ship',
'Osprey': 'Cruiser',
'Osprey Navy Issue': 'Cruiser',
'Paladin': 'Marauder',
'Panther': 'Black Op',
'Phantasm': 'Cruiser',
'Phobos': 'Heavy Interdictor',
'Phoenix': 'Dreadnought',
'Pilgrim': 'Recon Ship',
'Probe': 'Frigate',
'Procurer': 'Mining Barge',
'Prophecy': 'Battlecruiser',
'Prorator': 'Transport Ship',
'Prospect': 'Exploration Frigate',
'Proteus': 'Strategic Cruiser',
'Prowler': 'Transport Ship',
'Punisher': 'Frigate',
'Purifier': 'Stealth Bomber',
'Ragnarok': 'Titan',
'Rapier': 'Recon Ship',
'Raptor': 'Interceptor',
'Rattlesnake': 'Battleship',
'Raven': 'Battleship',
'Raven Navy Issue': 'Battleship',
'Raven State Issue': 'Battleship',
'Reaper': 'Rookie Ship',
'Redeemer': 'Black Op',
'Republic Fleet Firetail': 'Frigate',
'Retribution': 'Assault Frigate',
'Retriever': 'Mining Barge',
'Revelation': 'Dreadnought',
'Rhea': 'Jump Freighter',
'Rifter': 'Frigate',
'Rokh': 'Battleship',
'Rook': 'Recon Ship',
'Rorqual': 'Capital Industrial Ship',
'Rupture': 'Cruiser',
'Sabre': 'Interdictor',
'Sacrilege': 'Heavy Assault Cruiser',
'Scimitar': 'Logistics Cruiser',
'Scorpion': 'Battleship',
'Scythe': 'Cruiser',
'Scythe Fleet Issue': 'Cruiser',
'Sentinel': 'Electronic Attack Frigate',
'Sigil': 'Industrial Ship',
'Silver Magnate': 'Frigate',
'Sin': 'Black Op',
'Skiff': 'Exhumer Barge',
'Slasher': 'Frigate',
'Sleipnir': 'Command Ship',
'Stabber': 'Cruiser',
'Stabber Fleet Issue': 'Cruiser',
'Stiletto': 'Interceptor',
'Stratios': 'Cruiser',
'Succubus': 'Frigate',
'Svipul': 'Tactical Destroyer',
'Talos': 'Attack Battlecruiser',
'Talwar': 'Destroyer',
'Taranis': 'Interceptor',
'Tempest': 'Battleship',
'Tempest Fleet Issue': 'Battleship',
'Tempest Tribal Issue': 'Battleship',
'Tengu': 'Strategic Cruiser',
'Thanatos': 'Carrier',
'Thorax': 'Cruiser',
'Thrasher': 'Destroyer',
'Tormentor': 'Frigate',
'Tornado': 'Attack Battlecruiser',
'Tristan': 'Frigate',
'Typhoon': 'Battleship',
'Vagabond': 'Heavy Assault Cruiser',
'Vargur': 'Marauder',
'Velator': 'Rookie Ship',
'Vengeance': 'Assault Frigate',
'Vexor': 'Cruiser',
'Vexor Navy Issue': 'Cruiser',
'Viator': 'Transport Ship',
'Vigil': 'Frigate',
'Vigilant': 'Cruiser',
'Vindicator': 'Battleship',
'Vulture': 'Command Ship',
'Widow': 'Black Op',
'Wolf': 'Assault Frigate',
'Worm': 'Frigate',
'Wreathe': 'Industrial Ship',
'Wyvern': 'Carrier',
'Zealot': 'Heavy Assault Cruiser'
}
SIZES = {
'Assault Frigate': 'Frigate',
'Attack Battlecruiser': 'Battlecruiser',
'Battlecruiser': 'Battlecruiser',
'Battleship': 'Battleship',
'Black Op': 'Battleship',
'Capital Industrial Ship': 'Capital',
'Capsule': 'Capsule',
'Carrier': 'Capital',
'Command Ship': 'Battlecruiser',
'Covert Ops': 'Frigate',
'Cruiser': 'Cruiser',
'Destroyer': 'Destroyer',
'Dreadnought': 'Capital',
'Electronic Attack Frigate': 'Frigate',
'Exhumer Barge': 'Industrial Ship',
'Exploration Frigate': 'Frigate',
'Frigate': 'Frigate',
'Heavy Assault Cruiser': 'Cruiser',
'Heavy Interdictor': 'Cruiser',
'Industrial Ship': 'Industrial Ship',
'Interceptor': 'Frigate',
'Interdictor': 'Destroyer',
'Jump Freighter': 'Industrial Ship',
'Logistics Cruiser': 'Cruiser',
'Marauder': 'Battleship',
'Mining Barge': 'Industrial Ship',
'Recon Ship': 'Cruiser',
'Rookie Ship': 'Rookie Ship',
'Shuttle': 'Shuttle',
'Stealth Bomber': 'Frigate',
'Strategic Cruiser': 'Cruiser',
'Tactical Destroyer': 'Destroyer',
'Titan': 'Capital',
'Transport Ship': 'Industrial Ship'
}
|
StephenSwat/EVE-Fleetboss
|
fleetboss/ships.py
|
Python
|
gpl-3.0
| 9,057
|
[
"Jaguar",
"ORCA"
] |
46e4c9e2eaa88e98a87adaa30502c81c32bd3442f0904d278ea4132e4a2298e0
|
"""Mass-balance models"""
# Built ins
import logging
# External libs
import cftime
import numpy as np
import pandas as pd
import netCDF4
from scipy.interpolate import interp1d
from scipy import optimize as optimization
# Locals
import oggm.cfg as cfg
from oggm.cfg import SEC_IN_YEAR, SEC_IN_MONTH
from oggm.utils import (SuperclassMeta, lazy_property, floatyear_to_date,
date_to_floatyear, monthly_timeseries, ncDataset,
tolist, clip_min, clip_max, clip_array)
from oggm.exceptions import InvalidWorkflowError, InvalidParamsError
from oggm import entity_task
# Module logger
log = logging.getLogger(__name__)
class MassBalanceModel(object, metaclass=SuperclassMeta):
"""Interface and common logic for all mass balance models used in OGGM.
All mass-balance models should implement this interface.
Attributes
----------
valid_bounds : [float, float]
The altitudinal bounds where the MassBalanceModel is valid. This is
necessary for automated ELA search.
"""
def __init__(self):
""" Initialize."""
self.valid_bounds = None
self.hemisphere = None
self.rho = cfg.PARAMS['ice_density']
def __repr__(self):
"""String Representation of the mass-balance model"""
summary = ['<oggm.MassBalanceModel>']
summary += [' Class: ' + self.__class__.__name__]
summary += [' Attributes:']
# Add all scalar attributes
for k, v in self.__dict__.items():
if np.isscalar(v) and not k.startswith('_'):
nbform = ' - {}: {}'
if k == 'mu_star':
nbform = ' - {}: {:.2f}'
summary += [nbform.format(k, v)]
return '\n'.join(summary) + '\n'
# TODO: remove this in OGGM v1.5
@property
def prcp_bias(self):
raise AttributeError('prcp_bias has been renamed to prcp_fac as it is '
'a multiplicative factor, please use prcp_fac '
'instead.')
@prcp_bias.setter
def prcp_bias(self, new_prcp_fac):
raise AttributeError('prcp_bias has been renamed to prcp_fac as it is '
'a multiplicative factor. If you want to '
'change the precipitation scaling factor use '
'prcp_fac instead.')
def get_monthly_mb(self, heights, year=None, fl_id=None, fls=None):
"""Monthly mass-balance at given altitude(s) for a moment in time.
Units: [m s-1], or meters of ice per second
Note: `year` is optional because some simpler models have no time
component.
Parameters
----------
heights: ndarray
the atitudes at which the mass-balance will be computed
year: float, optional
the time (in the "hydrological floating year" convention)
fl_id: float, optional
the index of the flowline in the fls array (might be ignored
by some MB models)
fls: list of flowline instances, optional
the flowlines array, in case the MB model implementation needs
to know details about the glacier geometry at the moment the
MB model is called
Returns
-------
the mass-balance (same dim as `heights`) (units: [m s-1])
"""
raise NotImplementedError()
def get_annual_mb(self, heights, year=None, fl_id=None, fls=None):
"""Like `self.get_monthly_mb()`, but for annual MB.
For some simpler mass-balance models ``get_monthly_mb()` and
`get_annual_mb()`` can be equivalent.
Units: [m s-1], or meters of ice per second
Note: `year` is optional because some simpler models have no time
component.
Parameters
----------
heights: ndarray
the altitudes at which the mass-balance will be computed
year: float, optional
the time (in the "floating year" convention)
fl_id: float, optional
the index of the flowline in the fls array (might be ignored
by some MB models)
fls: list of flowline instances, optional
the flowlines array, in case the MB model implementation needs
to know details about the glacier geometry at the moment the
MB model is called
Returns
-------
the mass-balance (same dim as `heights`) (units: [m s-1])
"""
raise NotImplementedError()
def get_specific_mb(self, heights=None, widths=None, fls=None,
year=None):
"""Specific mb for this year and a specific glacier geometry.
Units: [mm w.e. yr-1], or millimeter water equivalent per year
Parameters
----------
heights: ndarray
the altitudes at which the mass-balance will be computed.
Overridden by ``fls`` if provided
widths: ndarray
the widths of the flowline (necessary for the weighted average).
Overridden by ``fls`` if provided
fls: list of flowline instances, optional
Another way to get heights and widths - overrides them if
provided.
year: float, optional
the time (in the "hydrological floating year" convention)
Returns
-------
the specific mass-balance (units: mm w.e. yr-1)
"""
if len(np.atleast_1d(year)) > 1:
out = [self.get_specific_mb(heights=heights, widths=widths,
fls=fls, year=yr)
for yr in year]
return np.asarray(out)
if fls is not None:
mbs = []
widths = []
for i, fl in enumerate(fls):
_widths = fl.widths
try:
# For rect and parabola don't compute spec mb
_widths = np.where(fl.thick > 0, _widths, 0)
except AttributeError:
pass
widths = np.append(widths, _widths)
mbs = np.append(mbs, self.get_annual_mb(fl.surface_h,
fls=fls, fl_id=i,
year=year))
else:
mbs = self.get_annual_mb(heights, year=year)
return np.average(mbs, weights=widths) * SEC_IN_YEAR * self.rho
def get_ela(self, year=None, **kwargs):
"""Compute the equilibrium line altitude for this year
Parameters
----------
year: float, optional
the time (in the "hydrological floating year" convention)
**kwargs: any other keyword argument accepted by self.get_annual_mb
Returns
-------
the equilibrium line altitude (ELA, units: m)
"""
if len(np.atleast_1d(year)) > 1:
return np.asarray([self.get_ela(year=yr, **kwargs) for yr in year])
if self.valid_bounds is None:
raise ValueError('attribute `valid_bounds` needs to be '
'set for the ELA computation.')
# Check for invalid ELAs
b0, b1 = self.valid_bounds
if (np.any(~np.isfinite(
self.get_annual_mb([b0, b1], year=year, **kwargs))) or
(self.get_annual_mb([b0], year=year, **kwargs)[0] > 0) or
(self.get_annual_mb([b1], year=year, **kwargs)[0] < 0)):
return np.NaN
def to_minimize(x):
return (self.get_annual_mb([x], year=year, **kwargs)[0] *
SEC_IN_YEAR * self.rho)
return optimization.brentq(to_minimize, *self.valid_bounds, xtol=0.1)
class ScalarMassBalance(MassBalanceModel):
"""Constant mass-balance, everywhere."""
def __init__(self, mb=0.):
""" Initialize.
Parameters
----------
mb: float
Fix the mass balance to a certain value (unit: [mm w.e. yr-1])
"""
super(ScalarMassBalance, self).__init__()
self.hemisphere = 'nh'
self.valid_bounds = [-2e4, 2e4] # in m
self._mb = mb
def get_monthly_mb(self, heights, **kwargs):
mb = np.asarray(heights) * 0 + self._mb
return mb / SEC_IN_YEAR / self.rho
def get_annual_mb(self, heights, **kwargs):
mb = np.asarray(heights) * 0 + self._mb
return mb / SEC_IN_YEAR / self.rho
class LinearMassBalance(MassBalanceModel):
"""Constant mass-balance as a linear function of altitude.
"""
def __init__(self, ela_h, grad=3., max_mb=None):
""" Initialize.
Parameters
----------
ela_h: float
Equilibrium line altitude (units: [m])
grad: float
Mass-balance gradient (unit: [mm w.e. yr-1 m-1])
max_mb: float
Cap the mass balance to a certain value (unit: [mm w.e. yr-1])
Attributes
----------
temp_bias : float, default 0
A "temperature bias" doesn't makes much sense in the linear MB
context, but we implemented a simple empirical rule:
+ 1K -> ELA + 150 m
"""
super(LinearMassBalance, self).__init__()
self.hemisphere = 'nh'
self.valid_bounds = [-1e4, 2e4] # in m
self.orig_ela_h = ela_h
self.ela_h = ela_h
self.grad = grad
self.max_mb = max_mb
self._temp_bias = 0
@property
def temp_bias(self):
"""Temperature bias to add to the original series."""
return self._temp_bias
@temp_bias.setter
def temp_bias(self, value):
"""Temperature bias to change the ELA."""
self.ela_h = self.orig_ela_h + value * 150
self._temp_bias = value
def get_monthly_mb(self, heights, **kwargs):
mb = (np.asarray(heights) - self.ela_h) * self.grad
if self.max_mb is not None:
clip_max(mb, self.max_mb, out=mb)
return mb / SEC_IN_YEAR / self.rho
def get_annual_mb(self, heights, **kwargs):
return self.get_monthly_mb(heights, **kwargs)
class PastMassBalance(MassBalanceModel):
"""Mass balance during the climate data period."""
def __init__(self, gdir, mu_star=None, bias=None,
filename='climate_historical', input_filesuffix='',
repeat=False, ys=None, ye=None, check_calib_params=True):
"""Initialize.
Parameters
----------
gdir : GlacierDirectory
the glacier directory
mu_star : float, optional
set to the alternative value of mu* you want to use
(the default is to use the calibrated value).
bias : float, optional
set to the alternative value of the calibration bias [mm we yr-1]
you want to use (the default is to use the calibrated value)
Note that this bias is *substracted* from the computed MB. Indeed:
BIAS = MODEL_MB - REFERENCE_MB.
filename : str, optional
set to a different BASENAME if you want to use alternative climate
data.
input_filesuffix : str
the file suffix of the input climate file
repeat : bool
Whether the climate period given by [ys, ye] should be repeated
indefinitely in a circular way
ys : int
The start of the climate period where the MB model is valid
(default: the period with available data)
ye : int
The end of the climate period where the MB model is valid
(default: the period with available data)
check_calib_params : bool
OGGM will try hard not to use wrongly calibrated mu* by checking
the parameters used during calibration and the ones you are
using at run time. If they don't match, it will raise an error.
Set to False to suppress this check.
Attributes
----------
temp_bias : float, default 0
Add a temperature bias to the time series
prcp_fac : float, default cfg.PARAMS['prcp_scaling_factor']
Precipitation factor to the time series (called factor to make clear
that it is a multiplicative factor in contrast to the additive
`temp_bias`)
"""
super(PastMassBalance, self).__init__()
self.valid_bounds = [-1e4, 2e4] # in m
if mu_star is None:
df = gdir.read_json('local_mustar')
mu_star = df['mu_star_glacierwide']
if check_calib_params:
if not df['mu_star_allsame']:
msg = ('You seem to use the glacier-wide mu* to compute '
'the mass-balance although this glacier has '
'different mu* for its flowlines. Set '
'`check_calib_params=False` to prevent this '
'error.')
raise InvalidWorkflowError(msg)
if bias is None:
if cfg.PARAMS['use_bias_for_run']:
df = gdir.read_json('local_mustar')
bias = df['bias']
else:
bias = 0.
self.mu_star = mu_star
self.bias = bias
# Parameters
self.t_solid = cfg.PARAMS['temp_all_solid']
self.t_liq = cfg.PARAMS['temp_all_liq']
self.t_melt = cfg.PARAMS['temp_melt']
prcp_fac = cfg.PARAMS['prcp_scaling_factor']
# check if valid prcp_fac is used
if prcp_fac <= 0:
raise InvalidParamsError('prcp_fac has to be above zero!')
default_grad = cfg.PARAMS['temp_default_gradient']
# Check the climate related params to the GlacierDir to make sure
if check_calib_params:
mb_calib = gdir.get_climate_info()['mb_calib_params']
for k, v in mb_calib.items():
if v != cfg.PARAMS[k]:
msg = ('You seem to use different mass-balance parameters '
'than used for the calibration: '
f"you use cfg.PARAMS['{k}']={cfg.PARAMS[k]} while "
f"it was calibrated with cfg.PARAMS['{k}']={v}. "
'Set `check_calib_params=False` to ignore this '
'warning.')
raise InvalidWorkflowError(msg)
# Public attrs
self.hemisphere = gdir.hemisphere
self.repeat = repeat
# Private attrs
# to allow prcp_fac to be changed after instantiation
# prescribe the prcp_fac as it is instantiated
self._prcp_fac = prcp_fac
# same for temp bias
self._temp_bias = 0.
# Read file
fpath = gdir.get_filepath(filename, filesuffix=input_filesuffix)
with ncDataset(fpath, mode='r') as nc:
# time
time = nc.variables['time']
try:
time = netCDF4.num2date(time[:], time.units)
except ValueError:
# This is for longer time series
time = cftime.num2date(time[:], time.units, calendar='noleap')
ny, r = divmod(len(time), 12)
if r != 0:
raise ValueError('Climate data should be N full years')
# This is where we switch to hydro float year format
# Last year gives the tone of the hydro year
self.years = np.repeat(np.arange(time[-1].year - ny + 1,
time[-1].year + 1), 12)
pok = slice(None) # take all is default (optim)
if ys is not None:
pok = self.years >= ys
if ye is not None:
try:
pok = pok & (self.years <= ye)
except TypeError:
pok = self.years <= ye
self.years = self.years[pok]
self.months = np.tile(np.arange(1, 13), ny)[pok]
# Read timeseries and correct it
self.temp = nc.variables['temp'][pok].astype(np.float64) + self._temp_bias
self.prcp = nc.variables['prcp'][pok].astype(np.float64) * self._prcp_fac
if 'gradient' in nc.variables:
grad = nc.variables['gradient'][pok].astype(np.float64)
# Security for stuff that can happen with local gradients
g_minmax = cfg.PARAMS['temp_local_gradient_bounds']
grad = np.where(~np.isfinite(grad), default_grad, grad)
grad = clip_array(grad, g_minmax[0], g_minmax[1])
else:
grad = self.prcp * 0 + default_grad
self.grad = grad
self.ref_hgt = nc.ref_hgt
self.ys = self.years[0]
self.ye = self.years[-1]
# adds the possibility of changing prcp_fac
# after instantiation with properly changing the prcp time series
@property
def prcp_fac(self):
return self._prcp_fac
@prcp_fac.setter
def prcp_fac(self, new_prcp_fac):
# just to check that no invalid prcp_factors are used
if np.any(np.asarray(new_prcp_fac) <= 0):
raise InvalidParamsError('prcp_fac has to be above zero!')
if len(np.atleast_1d(new_prcp_fac)) == 12:
# OK so that's monthly stuff
# We dirtily assume that user just used calendar month
sm = cfg.PARAMS['hydro_month_' + self.hemisphere]
new_prcp_fac = np.roll(new_prcp_fac, 13 - sm)
new_prcp_fac = np.tile(new_prcp_fac, len(self.prcp) // 12)
self.prcp *= new_prcp_fac / self._prcp_fac
# update old prcp_fac in order that it can be updated again ...
self._prcp_fac = new_prcp_fac
# same for temp_bias:
@property
def temp_bias(self):
return self._temp_bias
@temp_bias.setter
def temp_bias(self, new_temp_bias):
if len(np.atleast_1d(new_temp_bias)) == 12:
# OK so that's monthly stuff
# We dirtily assume that user just used calendar month
sm = cfg.PARAMS['hydro_month_' + self.hemisphere]
new_temp_bias = np.roll(new_temp_bias, 13 - sm)
new_temp_bias = np.tile(new_temp_bias, len(self.temp) // 12)
self.temp += new_temp_bias - self._temp_bias
# update old temp_bias in order that it can be updated again ...
self._temp_bias = new_temp_bias
def get_monthly_climate(self, heights, year=None):
"""Monthly climate information at given heights.
Note that prcp is corrected with the precipitation factor and that
all other model biases (temp and prcp) are applied.
Returns
-------
(temp, tempformelt, prcp, prcpsol)
"""
y, m = floatyear_to_date(year)
if self.repeat:
y = self.ys + (y - self.ys) % (self.ye - self.ys + 1)
if y < self.ys or y > self.ye:
raise ValueError('year {} out of the valid time bounds: '
'[{}, {}]'.format(y, self.ys, self.ye))
pok = np.where((self.years == y) & (self.months == m))[0][0]
# Read already (temperature bias and precipitation factor corrected!)
itemp = self.temp[pok]
iprcp = self.prcp[pok]
igrad = self.grad[pok]
# For each height pixel:
# Compute temp and tempformelt (temperature above melting threshold)
npix = len(heights)
temp = np.ones(npix) * itemp + igrad * (heights - self.ref_hgt)
tempformelt = temp - self.t_melt
clip_min(tempformelt, 0, out=tempformelt)
# Compute solid precipitation from total precipitation
prcp = np.ones(npix) * iprcp
fac = 1 - (temp - self.t_solid) / (self.t_liq - self.t_solid)
prcpsol = prcp * clip_array(fac, 0, 1)
return temp, tempformelt, prcp, prcpsol
def _get_2d_annual_climate(self, heights, year):
# Avoid code duplication with a getter routine
year = np.floor(year)
if self.repeat:
year = self.ys + (year - self.ys) % (self.ye - self.ys + 1)
if year < self.ys or year > self.ye:
raise ValueError('year {} out of the valid time bounds: '
'[{}, {}]'.format(year, self.ys, self.ye))
pok = np.where(self.years == year)[0]
if len(pok) < 1:
raise ValueError('Year {} not in record'.format(int(year)))
# Read already (temperature bias and precipitation factor corrected!)
itemp = self.temp[pok]
iprcp = self.prcp[pok]
igrad = self.grad[pok]
# For each height pixel:
# Compute temp and tempformelt (temperature above melting threshold)
heights = np.asarray(heights)
npix = len(heights)
grad_temp = np.atleast_2d(igrad).repeat(npix, 0)
grad_temp *= (heights.repeat(12).reshape(grad_temp.shape) -
self.ref_hgt)
temp2d = np.atleast_2d(itemp).repeat(npix, 0) + grad_temp
temp2dformelt = temp2d - self.t_melt
clip_min(temp2dformelt, 0, out=temp2dformelt)
# Compute solid precipitation from total precipitation
prcp = np.atleast_2d(iprcp).repeat(npix, 0)
fac = 1 - (temp2d - self.t_solid) / (self.t_liq - self.t_solid)
prcpsol = prcp * clip_array(fac, 0, 1)
return temp2d, temp2dformelt, prcp, prcpsol
def get_annual_climate(self, heights, year=None):
"""Annual climate information at given heights.
Note that prcp is corrected with the precipitation factor and that
all other model biases (temp and prcp) are applied.
Returns
-------
(temp, tempformelt, prcp, prcpsol)
"""
t, tmelt, prcp, prcpsol = self._get_2d_annual_climate(heights, year)
return (t.mean(axis=1), tmelt.sum(axis=1),
prcp.sum(axis=1), prcpsol.sum(axis=1))
def get_monthly_mb(self, heights, year=None, add_climate=False, **kwargs):
t, tmelt, prcp, prcpsol = self.get_monthly_climate(heights, year=year)
mb_month = prcpsol - self.mu_star * tmelt
mb_month -= self.bias * SEC_IN_MONTH / SEC_IN_YEAR
if add_climate:
return (mb_month / SEC_IN_MONTH / self.rho, t, tmelt,
prcp, prcpsol)
return mb_month / SEC_IN_MONTH / self.rho
def get_annual_mb(self, heights, year=None, add_climate=False, **kwargs):
t, tmelt, prcp, prcpsol = self._get_2d_annual_climate(heights, year)
mb_annual = np.sum(prcpsol - self.mu_star * tmelt, axis=1)
mb_annual = (mb_annual - self.bias) / SEC_IN_YEAR / self.rho
if add_climate:
return (mb_annual, t.mean(axis=1), tmelt.sum(axis=1),
prcp.sum(axis=1), prcpsol.sum(axis=1))
return mb_annual
class ConstantMassBalance(MassBalanceModel):
"""Constant mass-balance during a chosen period.
This is useful for equilibrium experiments.
"""
def __init__(self, gdir, mu_star=None, bias=None,
y0=None, halfsize=15, filename='climate_historical',
input_filesuffix='', **kwargs):
"""Initialize
Parameters
----------
gdir : GlacierDirectory
the glacier directory
mu_star : float, optional
set to the alternative value of mu* you want to use
(the default is to use the calibrated value)
bias : float, optional
set to the alternative value of the annual bias [mm we yr-1]
you want to use (the default is to use the calibrated value)
y0 : int, optional, default: tstar
the year at the center of the period of interest. The default
is to use tstar as center.
halfsize : int, optional
the half-size of the time window (window size = 2 * halfsize + 1)
filename : str, optional
set to a different BASENAME if you want to use alternative climate
data.
input_filesuffix : str
the file suffix of the input climate file
"""
super(ConstantMassBalance, self).__init__()
self.mbmod = PastMassBalance(gdir, mu_star=mu_star, bias=bias,
filename=filename,
input_filesuffix=input_filesuffix,
**kwargs)
if y0 is None:
df = gdir.read_json('local_mustar')
y0 = df['t_star']
# This is a quick'n dirty optimisation
try:
fls = gdir.read_pickle('model_flowlines')
h = []
for fl in fls:
# We use bed because of overdeepenings
h = np.append(h, fl.bed_h)
h = np.append(h, fl.surface_h)
zminmax = np.round([np.min(h)-50, np.max(h)+2000])
except FileNotFoundError:
# in case we don't have them
with ncDataset(gdir.get_filepath('gridded_data')) as nc:
if np.isfinite(nc.min_h_dem):
# a bug sometimes led to non-finite
zminmax = [nc.min_h_dem-250, nc.max_h_dem+1500]
else:
zminmax = [nc.min_h_glacier-1250, nc.max_h_glacier+1500]
self.hbins = np.arange(*zminmax, step=10)
self.valid_bounds = self.hbins[[0, -1]]
self.y0 = y0
self.halfsize = halfsize
self.years = np.arange(y0-halfsize, y0+halfsize+1)
self.hemisphere = gdir.hemisphere
@property
def temp_bias(self):
"""Temperature bias to add to the original series."""
return self.mbmod.temp_bias
@temp_bias.setter
def temp_bias(self, value):
"""Temperature bias to add to the original series."""
for attr_name in ['_lazy_interp_yr', '_lazy_interp_m']:
if hasattr(self, attr_name):
delattr(self, attr_name)
self.mbmod.temp_bias = value
@property
def prcp_fac(self):
"""Precipitation factor to apply to the original series."""
return self.mbmod.prcp_fac
@prcp_fac.setter
def prcp_fac(self, value):
"""Precipitation factor to apply to the original series."""
for attr_name in ['_lazy_interp_yr', '_lazy_interp_m']:
if hasattr(self, attr_name):
delattr(self, attr_name)
self.mbmod.prcp_fac = value
@property
def bias(self):
"""Residual bias to apply to the original series."""
return self.mbmod.bias
@bias.setter
def bias(self, value):
"""Residual bias to apply to the original series."""
self.mbmod.bias = value
@lazy_property
def interp_yr(self):
# annual MB
mb_on_h = self.hbins*0.
for yr in self.years:
mb_on_h += self.mbmod.get_annual_mb(self.hbins, year=yr)
return interp1d(self.hbins, mb_on_h / len(self.years))
@lazy_property
def interp_m(self):
# monthly MB
months = np.arange(12)+1
interp_m = []
for m in months:
mb_on_h = self.hbins*0.
for yr in self.years:
yr = date_to_floatyear(yr, m)
mb_on_h += self.mbmod.get_monthly_mb(self.hbins, year=yr)
interp_m.append(interp1d(self.hbins, mb_on_h / len(self.years)))
return interp_m
def get_monthly_climate(self, heights, year=None):
"""Average climate information at given heights.
Note that prcp is corrected with the precipitation factor and that
all other biases (precipitation, temp) are applied
Returns
-------
(temp, tempformelt, prcp, prcpsol)
"""
_, m = floatyear_to_date(year)
yrs = [date_to_floatyear(y, m) for y in self.years]
heights = np.atleast_1d(heights)
nh = len(heights)
shape = (len(yrs), nh)
temp = np.zeros(shape)
tempformelt = np.zeros(shape)
prcp = np.zeros(shape)
prcpsol = np.zeros(shape)
for i, yr in enumerate(yrs):
t, tm, p, ps = self.mbmod.get_monthly_climate(heights, year=yr)
temp[i, :] = t
tempformelt[i, :] = tm
prcp[i, :] = p
prcpsol[i, :] = ps
return (np.mean(temp, axis=0),
np.mean(tempformelt, axis=0),
np.mean(prcp, axis=0),
np.mean(prcpsol, axis=0))
def get_annual_climate(self, heights, year=None):
"""Average climate information at given heights.
Note that prcp is corrected with the precipitation factor and that
all other biases (precipitation, temp) are applied
Returns
-------
(temp, tempformelt, prcp, prcpsol)
"""
yrs = monthly_timeseries(self.years[0], self.years[-1],
include_last_year=True)
heights = np.atleast_1d(heights)
nh = len(heights)
shape = (len(yrs), nh)
temp = np.zeros(shape)
tempformelt = np.zeros(shape)
prcp = np.zeros(shape)
prcpsol = np.zeros(shape)
for i, yr in enumerate(yrs):
t, tm, p, ps = self.mbmod.get_monthly_climate(heights, year=yr)
temp[i, :] = t
tempformelt[i, :] = tm
prcp[i, :] = p
prcpsol[i, :] = ps
# Note that we do not weight for number of days per month:
# this is consistent with OGGM's calendar
return (np.mean(temp, axis=0),
np.mean(tempformelt, axis=0) * 12,
np.mean(prcp, axis=0) * 12,
np.mean(prcpsol, axis=0) * 12)
def get_monthly_mb(self, heights, year=None, add_climate=False, **kwargs):
yr, m = floatyear_to_date(year)
if add_climate:
t, tmelt, prcp, prcpsol = self.get_monthly_climate(heights, year=year)
return self.interp_m[m-1](heights), t, tmelt, prcp, prcpsol
return self.interp_m[m-1](heights)
def get_annual_mb(self, heights, year=None, add_climate=False, **kwargs):
mb = self.interp_yr(heights)
if add_climate:
t, tmelt, prcp, prcpsol = self.get_annual_climate(heights)
return mb, t, tmelt, prcp, prcpsol
return mb
class AvgClimateMassBalance(ConstantMassBalance):
"""Mass balance with the average climate of a selected period.
!!!Careful! This is conceptually wrong!!! This is here only to make
a point.
See https://oggm.org/2021/08/05/mean-forcing/
"""
def __init__(self, gdir, mu_star=None, bias=None,
filename='climate_historical', input_filesuffix='',
y0=None, halfsize=15, **kwargs):
"""Initialize.
Parameters
----------
gdir : GlacierDirectory
the glacier directory
mu_star : float, optional
set to the alternative value of mu* you want to use
(the default is to use the calibrated value).
bias : float, optional
set to the alternative value of the calibration bias [mm we yr-1]
you want to use (the default is to use the calibrated value)
Note that this bias is *substracted* from the computed MB. Indeed:
BIAS = MODEL_MB - REFERENCE_MB.
filename : str, optional
set to a different BASENAME if you want to use alternative climate
data.
input_filesuffix : str
the file suffix of the input climate file
y0 : int, optional, default: tstar
the year at the center of the period of interest. The default
is to use tstar as center.
halfsize : int, optional
the half-size of the time window (window size = 2 * halfsize + 1)
Attributes
----------
temp_bias : float, default 0
Add a temperature bias to the time series
prcp_fac : float, default cfg.PARAMS['prcp_scaling_factor']
Precipitation factor to the time series (called factor to make clear
that it is a multiplicative factor in contrast to the additive
`temp_bias`)
"""
super(AvgClimateMassBalance, self).__init__(gdir, mu_star=mu_star,
bias=bias,
filename=filename,
input_filesuffix=input_filesuffix,
y0=y0, halfsize=halfsize)
if y0 is None:
df = gdir.read_json('local_mustar')
y0 = df['t_star']
self.mbmod = PastMassBalance(gdir, mu_star=mu_star, bias=bias,
filename=filename,
input_filesuffix=input_filesuffix,
ys=y0-halfsize, ye=y0+halfsize,
**kwargs)
tmp = self.mbmod.temp
assert (len(tmp) // 12) == (halfsize * 2 + 1)
self.mbmod.temp = tmp.reshape((len(tmp) // 12, 12)).mean(axis=0)
tmp = self.mbmod.prcp
self.mbmod.prcp = tmp.reshape((len(tmp) // 12, 12)).mean(axis=0)
tmp = self.mbmod.grad
self.mbmod.grad = tmp.reshape((len(tmp) // 12, 12)).mean(axis=0)
self.mbmod.ys = y0
self.mbmod.ye = y0
self.mbmod.months = np.arange(1, 13, dtype=int)
self.mbmod.years = np.asarray([y0]*12)
self.years = np.asarray([y0]*12)
class RandomMassBalance(MassBalanceModel):
"""Random shuffle of all MB years within a given time period.
This is useful for finding a possible past glacier state or for sensitivity
experiments.
Note that this is going to be sensitive to extreme years in certain
periods, but it is by far more physically reasonable than other
approaches based on gaussian assumptions.
"""
def __init__(self, gdir, mu_star=None, bias=None,
y0=None, halfsize=15, seed=None,
filename='climate_historical', input_filesuffix='',
all_years=False, unique_samples=False, prescribe_years=None,
**kwargs):
"""Initialize.
Parameters
----------
gdir : GlacierDirectory
the glacier directory
mu_star : float, optional
set to the alternative value of mu* you want to use
(the default is to use the calibrated value)
bias : float, optional
set to the alternative value of the calibration bias [mm we yr-1]
you want to use (the default is to use the calibrated value)
Note that this bias is *substracted* from the computed MB. Indeed:
BIAS = MODEL_MB - REFERENCE_MB.
y0 : int, optional, default: tstar
the year at the center of the period of interest. The default
is to use tstar as center.
halfsize : int, optional
the half-size of the time window (window size = 2 * halfsize + 1)
seed : int, optional
Random seed used to initialize the pseudo-random number generator.
filename : str, optional
set to a different BASENAME if you want to use alternative climate
data.
input_filesuffix : str
the file suffix of the input climate file
all_years : bool
if True, overwrites ``y0`` and ``halfsize`` to use all available
years.
unique_samples: bool
if true, chosen random mass-balance years will only be available
once per random climate period-length
if false, every model year will be chosen from the random climate
period with the same probability
prescribe_years : pandas Series
instead of random samples, take a series of (i, y) pairs where
(i) is the simulation year index and (y) is the year to pick in the
original timeseries. Overrides `y0`, `halfsize`, `all_years`,
`unique_samples` and `seed`.
**kwargs:
kyeword arguments to pass to the PastMassBalance model
"""
super(RandomMassBalance, self).__init__()
self.valid_bounds = [-1e4, 2e4] # in m
self.mbmod = PastMassBalance(gdir, mu_star=mu_star, bias=bias,
filename=filename,
input_filesuffix=input_filesuffix,
**kwargs)
# Climate period
self.prescribe_years = prescribe_years
if self.prescribe_years is None:
# Normal stuff
self.rng = np.random.RandomState(seed)
if all_years:
self.years = self.mbmod.years
else:
if y0 is None:
df = gdir.read_json('local_mustar')
y0 = df['t_star']
self.years = np.arange(y0 - halfsize, y0 + halfsize + 1)
else:
self.rng = None
self.years = self.prescribe_years.index
self.yr_range = (self.years[0], self.years[-1] + 1)
self.ny = len(self.years)
self.hemisphere = gdir.hemisphere
self._state_yr = dict()
# Sampling without replacement
self.unique_samples = unique_samples
if self.unique_samples:
self.sampling_years = self.years
@property
def temp_bias(self):
"""Temperature bias to add to the original series."""
return self.mbmod.temp_bias
@temp_bias.setter
def temp_bias(self, value):
"""Temperature bias to add to the original series."""
for attr_name in ['_lazy_interp_yr', '_lazy_interp_m']:
if hasattr(self, attr_name):
delattr(self, attr_name)
self.mbmod.temp_bias = value
@property
def prcp_fac(self):
"""Precipitation factor to apply to the original series."""
return self.mbmod.prcp_fac
@prcp_fac.setter
def prcp_fac(self, value):
"""Precipitation factor to apply to the original series."""
for attr_name in ['_lazy_interp_yr', '_lazy_interp_m']:
if hasattr(self, attr_name):
delattr(self, attr_name)
self.mbmod.prcp_fac = value
@property
def bias(self):
"""Residual bias to apply to the original series."""
return self.mbmod.bias
@bias.setter
def bias(self, value):
"""Residual bias to apply to the original series."""
self.mbmod.bias = value
def get_state_yr(self, year=None):
"""For a given year, get the random year associated to it."""
year = int(year)
if year not in self._state_yr:
if self.prescribe_years is not None:
self._state_yr[year] = self.prescribe_years.loc[year]
else:
if self.unique_samples:
# --- Sampling without replacement ---
if self.sampling_years.size == 0:
# refill sample pool when all years were picked once
self.sampling_years = self.years
# choose one year which was not used in the current period
_sample = self.rng.choice(self.sampling_years)
# write chosen year to dictionary
self._state_yr[year] = _sample
# update sample pool: remove the chosen year from it
self.sampling_years = np.delete(
self.sampling_years,
np.where(self.sampling_years == _sample))
else:
# --- Sampling with replacement ---
self._state_yr[year] = self.rng.randint(*self.yr_range)
return self._state_yr[year]
def get_monthly_mb(self, heights, year=None, **kwargs):
ryr, m = floatyear_to_date(year)
ryr = date_to_floatyear(self.get_state_yr(ryr), m)
return self.mbmod.get_monthly_mb(heights, year=ryr, **kwargs)
def get_annual_mb(self, heights, year=None, **kwargs):
ryr = self.get_state_yr(int(year))
return self.mbmod.get_annual_mb(heights, year=ryr, **kwargs)
class UncertainMassBalance(MassBalanceModel):
"""Adding uncertainty to a mass balance model.
There are three variables for which you can add uncertainty:
- temperature (additive bias)
- precipitation (multiplicative factor)
- residual (a bias in units of MB)
"""
def __init__(self, basis_model,
rdn_temp_bias_seed=None, rdn_temp_bias_sigma=0.1,
rdn_prcp_bias_seed=None, rdn_prcp_bias_sigma=0.1,
rdn_bias_seed=None, rdn_bias_sigma=100):
"""Initialize.
Parameters
----------
basis_model : MassBalanceModel
the model to which you want to add the uncertainty to
rdn_temp_bias_seed : int
the seed of the random number generator
rdn_temp_bias_sigma : float
the standard deviation of the random temperature error
rdn_prcp_bias_seed : int
the seed of the random number generator
(to be consistent this should be renamed prcp_fac as well)
rdn_prcp_bias_sigma : float
the standard deviation of the random precipitation error
(to be consistent this should be renamed prcp_fac as well)
rdn_bias_seed : int
the seed of the random number generator
rdn_bias_sigma : float
the standard deviation of the random MB error
"""
super(UncertainMassBalance, self).__init__()
# the aim here is to change temp_bias and prcp_fac so
self.mbmod = basis_model
self.hemisphere = basis_model.hemisphere
self.valid_bounds = self.mbmod.valid_bounds
self.rng_temp = np.random.RandomState(rdn_temp_bias_seed)
self.rng_prcp = np.random.RandomState(rdn_prcp_bias_seed)
self.rng_bias = np.random.RandomState(rdn_bias_seed)
self._temp_sigma = rdn_temp_bias_sigma
self._prcp_sigma = rdn_prcp_bias_sigma
self._bias_sigma = rdn_bias_sigma
self._state_temp = dict()
self._state_prcp = dict()
self._state_bias = dict()
@property
def temp_bias(self):
"""Temperature bias to add to the original series."""
return self.mbmod.temp_bias
@temp_bias.setter
def temp_bias(self, value):
"""Temperature bias to add to the original series."""
for attr_name in ['_lazy_interp_yr', '_lazy_interp_m']:
if hasattr(self, attr_name):
delattr(self, attr_name)
self.mbmod.temp_bias = value
@property
def prcp_fac(self):
"""Precipitation factor to apply to the original series."""
return self.mbmod.prcp_fac
@prcp_fac.setter
def prcp_fac(self, value):
"""Precipitation factor to apply to the original series."""
self.mbmod.prcp_fac = value
def _get_state_temp(self, year):
year = int(year)
if year not in self._state_temp:
self._state_temp[year] = self.rng_temp.randn() * self._temp_sigma
return self._state_temp[year]
def _get_state_prcp(self, year):
year = int(year)
if year not in self._state_prcp:
self._state_prcp[year] = self.rng_prcp.randn() * self._prcp_sigma
return self._state_prcp[year]
def _get_state_bias(self, year):
year = int(year)
if year not in self._state_bias:
self._state_bias[year] = self.rng_bias.randn() * self._bias_sigma
return self._state_bias[year]
def get_monthly_mb(self, heights, year=None, **kwargs):
raise NotImplementedError()
def get_annual_mb(self, heights, year=None, fl_id=None, **kwargs):
# Keep the original biases and add a random error
_t = self.mbmod.temp_bias
_p = self.mbmod.prcp_fac
_b = self.mbmod.bias
self.mbmod.temp_bias = self._get_state_temp(year) + _t
self.mbmod.prcp_fac = self._get_state_prcp(year) + _p
self.mbmod.bias = self._get_state_bias(year) + _b
try:
out = self.mbmod.get_annual_mb(heights, year=year, fl_id=fl_id)
except BaseException:
self.mbmod.temp_bias = _t
self.mbmod.prcp_fac = _p
self.mbmod.bias = _b
raise
# Back to normal
self.mbmod.temp_bias = _t
self.mbmod.prcp_fac = _p
self.mbmod.bias = _b
return out
class MultipleFlowlineMassBalance(MassBalanceModel):
"""Handle mass-balance at the glacier level instead of flowline level.
Convenience class doing not much more than wrapping a list of mass-balance
models, one for each flowline.
This is useful for real-case studies, where each flowline might have a
different mu*.
Attributes
----------
fls : list
list of flowline objects
mb_models : list
list of mass-balance objects
"""
def __init__(self, gdir, fls=None, mu_star=None,
mb_model_class=PastMassBalance, use_inversion_flowlines=False,
input_filesuffix='', bias=None, **kwargs):
"""Initialize.
Parameters
----------
gdir : GlacierDirectory
the glacier directory
mu_star : float or list of floats, optional
set to the alternative value of mu* you want to use
(the default is to use the calibrated value). Give a list of values
for flowline-specific mu*
fls : list, optional
list of flowline objects to use (defaults to 'model_flowlines',
and if not available, to 'inversion_flowlines')
mb_model_class : class, optional
the mass-balance model to use (e.g. PastMassBalance,
ConstantMassBalance...)
use_inversion_flowlines: bool, optional
if True 'inversion_flowlines' instead of 'model_flowlines' will be
used.
input_filesuffix : str
the file suffix of the input climate file
bias : float, optional
set to the alternative value of the calibration bias [mm we yr-1]
you want to use (the default is to use the calibrated value)
Note that this bias is *substracted* from the computed MB. Indeed:
BIAS = MODEL_MB - REFERENCE_MB.
kwargs : kwargs to pass to mb_model_class
"""
# Read in the flowlines
if use_inversion_flowlines:
fls = gdir.read_pickle('inversion_flowlines')
if fls is None:
try:
fls = gdir.read_pickle('model_flowlines')
except FileNotFoundError:
raise InvalidWorkflowError('Need a valid `model_flowlines` '
'file. If you explicitly want to '
'use `inversion_flowlines`, set '
'use_inversion_flowlines=True.')
self.fls = fls
_y0 = kwargs.get('y0', None)
# User mu*?
if mu_star is not None:
mu_star = tolist(mu_star, length=len(fls))
for fl, mu in zip(self.fls, mu_star):
fl.mu_star = mu
# Initialise the mb models
self.flowline_mb_models = []
for fl in self.fls:
# Merged glaciers will need different climate files, use filesuffix
if (fl.rgi_id is not None) and (fl.rgi_id != gdir.rgi_id):
rgi_filesuffix = '_' + fl.rgi_id + input_filesuffix
else:
rgi_filesuffix = input_filesuffix
# merged glaciers also have a different MB bias from calibration
if ((bias is None) and cfg.PARAMS['use_bias_for_run'] and
(fl.rgi_id != gdir.rgi_id)):
df = gdir.read_json('local_mustar', filesuffix='_' + fl.rgi_id)
fl_bias = df['bias']
else:
fl_bias = bias
# Constant and RandomMassBalance need y0 if not provided
if (issubclass(mb_model_class, RandomMassBalance) or
issubclass(mb_model_class, ConstantMassBalance)) and (
fl.rgi_id != gdir.rgi_id) and (_y0 is None):
df = gdir.read_json('local_mustar', filesuffix='_' + fl.rgi_id)
kwargs['y0'] = df['t_star']
self.flowline_mb_models.append(
mb_model_class(gdir, mu_star=fl.mu_star, bias=fl_bias,
input_filesuffix=rgi_filesuffix, **kwargs))
self.valid_bounds = self.flowline_mb_models[-1].valid_bounds
self.hemisphere = gdir.hemisphere
@property
def temp_bias(self):
"""Temperature bias to add to the original series."""
return self.flowline_mb_models[0].temp_bias
@temp_bias.setter
def temp_bias(self, value):
"""Temperature bias to add to the original series."""
for mbmod in self.flowline_mb_models:
mbmod.temp_bias = value
@property
def prcp_fac(self):
"""Precipitation factor to apply to the original series."""
return self.flowline_mb_models[0].prcp_fac
@prcp_fac.setter
def prcp_fac(self, value):
"""Precipitation factor to apply to the original series."""
for mbmod in self.flowline_mb_models:
mbmod.prcp_fac = value
@property
def bias(self):
"""Residual bias to apply to the original series."""
return self.flowline_mb_models[0].bias
@bias.setter
def bias(self, value):
"""Residual bias to apply to the original series."""
for mbmod in self.flowline_mb_models:
mbmod.bias = value
def get_monthly_mb(self, heights, year=None, fl_id=None, **kwargs):
if fl_id is None:
raise ValueError('`fl_id` is required for '
'MultipleFlowlineMassBalance!')
return self.flowline_mb_models[fl_id].get_monthly_mb(heights,
year=year,
**kwargs)
def get_annual_mb(self, heights, year=None, fl_id=None, **kwargs):
if fl_id is None:
raise ValueError('`fl_id` is required for '
'MultipleFlowlineMassBalance!')
return self.flowline_mb_models[fl_id].get_annual_mb(heights,
year=year,
**kwargs)
def get_annual_mb_on_flowlines(self, fls=None, year=None):
"""Get the MB on all points of the glacier at once.
Parameters
----------
fls: list, optional
the list of flowlines to get the mass-balance from. Defaults
to self.fls
year: float, optional
the time (in the "floating year" convention)
Returns
-------
Tuple of (heights, widths, mass_balance) 1D arrays
"""
if fls is None:
fls = self.fls
heights = []
widths = []
mbs = []
for i, fl in enumerate(fls):
h = fl.surface_h
heights = np.append(heights, h)
widths = np.append(widths, fl.widths)
mbs = np.append(mbs, self.get_annual_mb(h, year=year, fl_id=i))
return heights, widths, mbs
def get_specific_mb(self, heights=None, widths=None, fls=None,
year=None):
if heights is not None or widths is not None:
raise ValueError('`heights` and `widths` kwargs do not work with '
'MultipleFlowlineMassBalance!')
if fls is None:
fls = self.fls
if len(np.atleast_1d(year)) > 1:
out = [self.get_specific_mb(fls=fls, year=yr) for yr in year]
return np.asarray(out)
mbs = []
widths = []
for i, (fl, mb_mod) in enumerate(zip(fls, self.flowline_mb_models)):
_widths = fl.widths
try:
# For rect and parabola don't compute spec mb
_widths = np.where(fl.thick > 0, _widths, 0)
except AttributeError:
pass
widths = np.append(widths, _widths)
mb = mb_mod.get_annual_mb(fl.surface_h, year=year, fls=fls, fl_id=i)
mbs = np.append(mbs, mb * SEC_IN_YEAR * mb_mod.rho)
return np.average(mbs, weights=widths)
def get_ela(self, year=None, **kwargs):
# ELA here is not without ambiguity.
# We compute a mean weighted by area.
if len(np.atleast_1d(year)) > 1:
return np.asarray([self.get_ela(year=yr) for yr in year])
elas = []
areas = []
for fl_id, (fl, mb_mod) in enumerate(zip(self.fls,
self.flowline_mb_models)):
elas = np.append(elas, mb_mod.get_ela(year=year, fl_id=fl_id,
fls=self.fls))
areas = np.append(areas, np.sum(fl.widths))
return np.average(elas, weights=areas)
@entity_task(log)
def fixed_geometry_mass_balance(gdir, ys=None, ye=None, years=None,
monthly_step=False,
use_inversion_flowlines=True,
climate_filename='climate_historical',
climate_input_filesuffix='',
temperature_bias=None,
precipitation_factor=None):
"""Computes the mass-balance with climate input from e.g. CRU or a GCM.
Parameters
----------
gdir : :py:class:`oggm.GlacierDirectory`
the glacier directory to process
ys : int
start year of the model run (default: from the climate file)
date)
ye : int
end year of the model run (default: from the climate file)
years : array of ints
override ys and ye with the years of your choice
monthly_step : bool
whether to store the diagnostic data at a monthly time step or not
(default is yearly)
use_inversion_flowlines : bool
whether to use the inversion flowlines or the model flowlines
climate_filename : str
name of the climate file, e.g. 'climate_historical' (default) or
'gcm_data'
climate_input_filesuffix: str
filesuffix for the input climate file
temperature_bias : float
add a bias to the temperature timeseries
precipitation_factor: float
multiply a factor to the precipitation time series
default is None and means that the precipitation factor from the
calibration is applied which is cfg.PARAMS['prcp_scaling_factor']
"""
if monthly_step:
raise NotImplementedError('monthly_step not implemented yet')
mbmod = MultipleFlowlineMassBalance(gdir, mb_model_class=PastMassBalance,
filename=climate_filename,
use_inversion_flowlines=use_inversion_flowlines,
input_filesuffix=climate_input_filesuffix)
if temperature_bias is not None:
mbmod.temp_bias = temperature_bias
if precipitation_factor is not None:
mbmod.prcp_fac = precipitation_factor
if years is None:
if ys is None:
ys = mbmod.flowline_mb_models[0].ys
if ye is None:
ye = mbmod.flowline_mb_models[0].ye
years = np.arange(ys, ye + 1)
odf = pd.Series(data=mbmod.get_specific_mb(year=years),
index=years)
return odf
@entity_task(log)
def compute_ela(gdir, ys=None, ye=None, years=None, climate_filename='climate_historical',
temperature_bias=None, precipitation_factor=None, climate_input_filesuffix=''):
"""Computes the ELA of a glacier for a for given years and climate.
Parameters
----------
gdir : :py:class:`oggm.GlacierDirectory`
the glacier directory to process
ys : int
start year
ye : int
end year
years : array of ints
override ys and ye with the years of your choice
climate_filename : str
name of the climate file, e.g. 'climate_historical' (default) or
'gcm_data'
climate_input_filesuffix : str
filesuffix for the input climate file
temperature_bias : float
add a bias to the temperature timeseries
precipitation_factor: float
multiply a factor to the precipitation time series
default is None and means that the precipitation factor from the
calibration is applied which is cfg.PARAMS['prcp_scaling_factor']
-------
"""
mbmod = PastMassBalance(gdir, filename=climate_filename,
input_filesuffix=climate_input_filesuffix)
if temperature_bias is not None:
mbmod.temp_bias = temperature_bias
if precipitation_factor is not None:
mbmod.prcp_fac = precipitation_factor
mbmod.valid_bounds = [-10000, 20000]
if years is None:
years = np.arange(ys, ye+1)
ela = []
for yr in years:
ela = np.append(ela, mbmod.get_ela(year=yr))
odf = pd.Series(data=ela, index=years)
return odf
|
TimoRoth/oggm
|
oggm/core/massbalance.py
|
Python
|
bsd-3-clause
| 57,645
|
[
"Gaussian"
] |
7412be97c5fce2c28c83f125dc484b571c9a958854a3e00228bb105116c80efa
|
#import stuff
import os
import random
import math
from scipy import stats as scstats
from matplotlib import pyplot as plt
from tabulate import tabulate as tabulate
import matplotlib.cm as cm
import sys
import numpy as np
import scipy.stats as stats
#### data preprocessing ####
class Enri():
def __init__(self):
self.pydir = os.getcwd()
self.paths = open('%s/infiles/paths.txt' % self.pydir).read().splitlines()
self.dirs = self.make_dirs()
self.data, self.headers = self.parse_train_txt()
self.pocket1, self.pocket1_headers = self.get_pocket2(self.data, self.headers)
def make_dirs(self):
''' make output directories '''
pydir = os.getcwd()
curdir = os.listdir(pydir)
dirs = ['figures', 'tables', 'outfiles']
path_dirs = []
for dir in dirs:
path_dir = pydir + '/%s' % dir
path_dirs.append(path_dir)
if dir not in curdir:
os.system('mkdir %s' % path_dir)
return path_dirs
def find_files(self, filename):
''' find files, approximate matching '''
file_paths = []
for path in self.paths:
for root, dirs, files in os.walk(path):
for file in files:
if filename in file:
file_path = os.path.join(root,file)
file_paths.append(file_path)
return file_paths
def find_files_from_path(self, filename, path):
''' find files, approximate matching '''
file_paths = []
for root, dirs, files in os.walk(path):
for file in files:
if filename in file:
file_path = os.path.join(root,file)
file_paths.append(file_path)
return file_paths
def find_files_pydir(self, filename):
''' find files in pydir and its subdir. approximate matching '''
filepaths = []
for root, dirs, files in os.walk(self.pydir):
for file in files:
if filename in file:
filepath = os.path.join(root, file)
filepaths.append(filepath)
print filepaths
return filepaths
def rename_files(self,filename):
''' make pdbs dir, rename pdb files, dump to pdbs dir '''
pdbdir = '/Users/rahmadakbar/uc/enrichments/pdbs'
os.system('mkdir %s' % pdbdir)
files = self.find_files(filename)
print len(files)
for file in files:
parts = file.split('/')
if 'high' in file:
new_name = 'h_' + parts[-1]
else:
new_name = 'l_' + parts[-1]
new_file = pdbdir + '/' + new_name
command = 'cp %s %s' % (file, new_file)
os.system(command)
def pdb2desc(self, filename):
''' use dogsitescorer to harvest pockets and descriptors from pdb files '''
files = self.find_files(filename)
print len(files)
dogout_dir = '/'.join(files[0].split('/')[:-1]) + '/dogout'
os.system('mkdir %s' % dogout_dir)
exception_path = dogout_dir + '/exception.txt'
exception_file = open(exception_path, 'w')
for file in files[:]:
print file
try:
contents = open(file).read().split('TER')
#print contents
ligand = contents[-1].splitlines()[1][17:20]
protname = file.split('/')[-1].split('.')[0]
command = 'dogsite -p %s -o %s/%s -r %s -w3 -d -i' % (file, dogout_dir, protname, ligand)
os.system(command)
except Exception:
exception_file.write(file + '\n')
#pass
exception_file.close()
def pdb2desc_from_path(self, pdbpath):
''' use dogsitescorer to harvest pockets and descriptors from pdb files '''
files = self.find_files_from_path('pdb', pdbpath)
print len(files)
dogout_dir = '/'.join(files[0].split('/')[:-1]) + '/dogout'
os.system('mkdir %s' % dogout_dir)
exception_path = dogout_dir + '/exception.txt'
exception_file = open(exception_path, 'w')
for file in files[:]:
print file
try:
contents = open(file).read().split('TER')
#print contents
ligand = contents[-1].splitlines()[1][17:20]
protname = file.split('/')[-1].split('.')[0]
command = 'dogsite -p %s -o %s/%s -r %s -w3 -d -i' % (file, dogout_dir, protname, ligand)
os.system(command)
except Exception:
exception_file.write(file + '\n')
#pass
exception_file.close()
def name2firstcol(self, filename):
''' add protein name to the 1st column of desc.txt files
output .edt files '''
files = self.find_files(filename)
for file in files[:]:
data = ''
names = self.path2names(file)
protname = names[1][:-4]
contents = open(file).readlines()
for line in contents[:]:
if line.startswith('name'):
data += line
else:
newline = protname + line
data += newline
outpath = names[0] + '/' + names[1] + '.edt'
outfile = open(outpath, 'w')
outfile.write(data)
def name2firstcol_from_path(self, pdbpath):
''' add protein name to the 1st column of desc.txt files
output .edt files '''
files = self.find_files_from_path('desc.txt', pdbpath)
for file in files[:]:
data = ''
names = self.path2names(file)
protname = names[1][:-4]
contents = open(file).readlines()
for line in contents[:]:
if line.startswith('name'):
data += line
else:
newline = protname + line
data += newline
outpath = names[0] + '/' + names[1] + '.edt'
outfile = open(outpath, 'w')
outfile.write(data)
def merge_edt(self, filename='.edt'):
''' merge all .edt files to a single file '''
files = self.find_files(filename)
names = self.path2names(files[0])
path = names[0]
merged_content = open(files[0]).read()
for file in files[1:]:
contents = open(file).readlines()[1:] # skip header
for line in contents:
merged_content += line
outfile = open('%s/desc_merged.txt' % path, 'w')
outfile.write(merged_content)
outfile.close()
print len(open('%s/desc_merged.txt' % path).readlines())
def merge_edt_from_path(self, pdbpath):
''' merge all .edt files to a single file '''
files = self.find_files_from_path('.edt', pdbpath)
names = self.path2names(files[0])
path = names[0]
merged_content = open(files[0]).read()
for file in files[1:]:
contents = open(file).readlines()[1:] # skip header
for line in contents:
merged_content += line
outfile = open('%s/desc_merged.txt' % path, 'w')
outfile.write(merged_content)
outfile.close()
print len(open('%s/desc_merged.txt' % path).readlines())
def parse_edt(self, filename = 'desc_merged.txt'):
''' parse desc_merged.txt, output features and headers'''
file = open(self.find_files(filename)[0]).read().splitlines()
unwanted_index = [3]
data = [line.split('\t') for line in file]
data2 = [[x for i,x in enumerate(item) if i not in unwanted_index] for item in data[:]]
headers = data2[0][:]
data2 = [[item[0]]+[float(x) for x in item[1:]] for item in data2[1:]]
print len(data2)
print headers
print data2[0]
return data2, headers
def parse_desc_merged_txt(self, filepath):
''' parse *desc.txt file given absolute path '''
file = open(filepath).read().splitlines()
unwanted_index = [3,63]
unwanted_headers = ['lig_name', 'UNK']
data = [line.split('\t') for line in file]
data2 = [[x for i,x in enumerate(item) if i not in unwanted_index] for item in data[:]]
headers = data[0][:]
contents = data[1:]
float_contents = []
float_headers = [header for header in headers if header not in unwanted_headers]
for content in contents:
protname = content[0]
descs = content[1:]
float_content = [protname]
for i in range(1, len(headers)):
header = headers[i]
if header not in unwanted_headers:
value = float(content[i])
float_content.append(value)
float_contents.append(float_content)
return float_contents, float_headers
#data2 = [[item[0]]+[float(x) for x in item[1:]] for item in data2[1:]]
#return data2, headers
def parse_train_txt(self):
''' parse tain.txt file, outputs features and headers'''
#train_file = self.pydir + '/outfiles/hdstat_data.tsv'
#train_file = self.pydir + '/infiles/train2.txt'
train_file = self.pydir + '/infiles/train3.txt'
file = open(train_file).read().splitlines()
unwanted_index = [3] #previously was [3]
data = [line.split('\t') for line in file]
data2 = [[x for i,x in enumerate(item) if i not in unwanted_index] for item in data[:]]
headers = data2[0][:]
data2 = [[item[0]]+[float(x) for x in item[1:]] for item in data2[1:]]
return data2, headers
def parse_train_txt2(self, filepath):
''' parse tain.txt file from an absoulute path, outputs features and headers'''
train_file = filepath
file = open(train_file).read().splitlines()
unwanted_index = [3]
data = [line.split('\t') for line in file]
data2 = [[x for i,x in enumerate(item) if i not in unwanted_index] for item in data[:]]
headers = data2[0][:]
data2 = [[item[0]]+[float(x) for x in item[1:]] for item in data2[1:]]
return data2, headers
def parse_train_txts(self):
''' parse train.txt files. returns a list of features and headers '''
train_files = []
traintxt_path = self.pydir + '/infiles'
for root, dirs, files in os.walk(traintxt_path):
for file in files:
if 'train' in file:
train_file = os.path.join(root, file)
train_files.append(train_file)
train_datas = []
for train_file in train_files:
names = self.path2names(train_file)
name = names[1]
train_data = self.parse_train_txt2(train_file)
train_data.append(name)
train_datas.append(train_data)
return train_datas
def plot_train_datas(self):
''' plots histogram of the features from each train data '''
train_datas = self.parse_train_txts()
for train_data in train_datas:
data, headers, name = train_data
clean_data, clean_headers = self.clean_data(data, headers)
self.plot_hist(clean_data, clean_headers, name)
def get_pocket(self, pocket, filename = 'desc_merged.txt'):
''' returns a specific pocket from the file. for instance, setting pocket =
P_1 returns only pocket P_1 '''
data, headers = self.parse_edt(filename)
pockets = []
for item in data:
name = item[0]
if pocket == name[-3:]:
pockets.append(item)
return pockets, headers
def get_pocket2(self, data, headers, pocket = 'P_1'):
''' returns a specific pocket from the file. for instance, setting pocket =
P_1 returns only pocket P_1 '''
pockets = []
for item in data:
name = item[0]
if pocket == name[-3:]:
pockets.append(item)
return pockets, headers
def split_con_nom(self, filename = 'desc_merged.txt'):
data, headers= self.get_pocket('P_1', filename)
con_index, nom_index = [0], [0]
for i,item in enumerate(data[100][1:]):
rem = item%1
if rem != 0:
con_index.append(i+1)
else:
nom_index.append(i+1)
con_data, nom_data = [], []
for item in data:
name = item[0]
con_vals, nom_vals = [], []
con_vals.append(name)
nom_vals.append(name)
for i,val in enumerate(item[1:]):
index = i+1
if index in con_index:
con_vals.append(val)
else:
nom_vals.append(val)
con_data.append(con_vals)
nom_data.append(nom_vals)
con_headers, nom_headers = [headers[0]], [headers[0]]
for i in range(len(headers[1:])):
index = i+1
if index in con_index:
con_headers.append(headers[index])
else:
nom_headers.append(headers[index])
return [con_data, con_headers, 'con_data'],[nom_data, nom_headers, 'nom_data']
def output_data_files(self, filename):
''' output the splitted continous and nominal data onto two filse
continous.txt and nominal.txt '''
datas = self.split_con_nom(filename)
for data in datas:
content = ''
name = data[2]
outname = self.dirs[2] + '/%s.txt' %name
outfile = open(outname, 'w')
content += ','.join(data[1]) + '\n'
for item in data[0]:
content += ','.join(str(x) for x in item) + '\n'
outfile.write(content)
outfile.close()
def split_l_h(self,list):
''' split high (h) and low (l). the list format should follow that of
parse_edt output'''
l = []
h = []
for item in list:
name = item[0]
if name[0] == 'h':
h.append(item)
else:
l.append(item)
return l, h
def plot_con_hist(self, filename='desc_merged.txt'):
data = self.split_con_nom(filename)
cons = data[0]
con_data = cons[0]
con_headers = cons[1]
l,h = self.split_l_h(con_data)
for i in range(1,len(con_headers)):
l_xs = [x[i] for x in l]
h_xs = [x[i] for x in h]
header = con_headers[i].replace('/', '_')
plot_name = self.dirs[0] + '/%s_hist.pdf' % header
plt.figure()
plt.hist(l_xs, bins = 20, alpha = 0.5, label = 'l', edgecolor = 'none')
plt.hist(h_xs, bins = 20, alpha = 0.5, label = 'h', edgecolor = 'none')
plt.legend()
plt.savefig(plot_name)
print 'figures are saved to %s' % self.dirs[0]
def plot_nom_hist(self, filename='desc_merged.txt'):
data = self.split_con_nom(filename)
cons = data[0]
nom_data = cons[0]
nom_headers = cons[1]
l,h = self.split_l_h(nom_data)
for i in range(1,len(nom_headers)):
l_xs = [x[i] for x in l]
h_xs = [x[i] for x in h]
header = nom_headers[i].replace('/', '_')
plot_name = self.dirs[0] + '/%s_hist.pdf' % header
plt.figure()
plt.hist(l_xs, bins = 20, alpha = 0.5, label = 'l', edgecolor = 'none')
plt.hist(h_xs, bins = 20, alpha = 0.5, label = 'h', edgecolor = 'none')
plt.legend()
plt.savefig(plot_name)
print 'figures are saved to %s' % self.dirs[0]
def plot_hist(self, data, headers, figname):
''' outputs histogram of each features '''
l,h = self.split_l_h(data)
for i in range(1,len(headers)):
l_xs = [x[i] for x in l]
h_xs = [x[i] for x in h]
header = headers[i].replace('/', '_')
plotpath = self.dirs[0] + '/%s_%s_hist.pdf' % (header,figname)
plt.figure()
plt.hist(l_xs, bins = 20, alpha = 0.5, label = 'l', edgecolor = 'none')
plt.hist(h_xs, bins = 20, alpha = 0.5, label = 'h', edgecolor = 'none')
plt.legend()
plt.savefig(plotpath)
plt.close()
#break
print 'figures are saved to %s' % self.dirs[0]
def plot_hist_nolabel(self, data, headers, outdir):
''' outputs histogram of each features '''
for i in range(1,len(headers)):
xs = [x[i] for x in data]
mean, sd = round(self.mean(xs),3), round(self.sd(xs),3)
plotlabel = 'mean %s, sd %s' % (mean, sd)
header = headers[i].replace('/', '_')
plotpath = outdir + '/%s_hist.pdf' % header
plt.figure()
plt.hist(xs, bins = 50, alpha = 0.5, edgecolor = 'none', label=plotlabel)
plt.legend(frameon = False)
plt.title(headers[i])
plt.savefig(plotpath)
plt.close()
#break
print 'figures are saved to %s' % outdir
def rows2columns(self, data, headers):
''' transform data from parse_edt rows list to a columns list '''
coldata = [[] for x in headers]
for item in data:
for i, val in enumerate(item):
coldata[i].append(val)
return coldata, headers
def columns2rows(self, coldata, headers):
''' transforms columns to rows '''
rows = len(coldata[0])
rowdata = [[] for x in range(rows)]
for items in coldata:
for i,item in enumerate(items):
rowdata[i].append(item)
return rowdata, headers
def get_nonzero_features(self, data, headers):
''' get non zero features from data, headers list. drop UNK and lig_cov'''
data, headers = self.rows2columns(data, headers)
new_headers = []
new_data = []
unwanted_vars = ['UNK', 'lig_cov']
for i, item in enumerate(data[:]):
var_name = headers[i]
if i == 0:
new_data.append(item)
new_headers.append(headers[i])
elif i > 0 and sum(item) > 0 and var_name not in unwanted_vars:
new_data.append(item)
new_headers.append(headers[i])
rowdata, new_headers = self.columns2rows(new_data, new_headers)
return rowdata, new_headers
def clean_data(self, data, headers):
''' returns clean data, grabs only pocket P_1 and removes zero features '''
pockets, headers = self.get_pocket2(data, headers)
nz_data, nz_headers = self.get_nonzero_features(pockets, headers)
return nz_data, nz_headers
def clean_data_continuous(self, data, headers, sample_index):
''' returns clean data, grabs only continuous data for pocket P_1 and removes zero features '''
pockets, headers = self.get_pocket2(data, headers)
nz_data, nz_headers = self.get_nonzero_features(pockets, headers)
con_data, con_headers = self.continuous_data(nz_data, nz_headers, sample_index)
#print con_headers
return con_data, con_headers
def path2names(self, filepath):
''' returns file name with and without extention for a given path '''
parts = filepath.split('/')
path = '/'.join(parts[:-1])
name_ex = parts[-1]
name = name_ex.split('.')[0]
return path, name, name_ex
### adaptive synthetic sampling ###
def euclidian_distance(self, p, q):
''' get euclidian distance from two points. points are a vector of n
dimensions'''
d = math.sqrt(sum([(p_i - q_i)**2 for p_i, q_i in zip(p, q) ]))
return d
def vector_diff(self, p,q):
''' get the difference between two vectors '''
dif = [p_i - q_i for p_i, q_i in zip(p,q)]
return dif
def vector_sum(self, p,q):
''' get the sum between two vectors '''
sum = [p_i + q_i for p_i, q_i in zip(p,q)]
return sum
def scalar_mul(self, p, s):
''' returns scalar multiplication of vector p and scalar s '''
mul = [p_i*s for p_i in p]
return mul
def scalar_mul_random(self, p):
''' returns scalar multiplication of vector p and scalar s '''
s = random.random()
mul = [p_i*s for p_i in p]
return mul
def generate_synthetic_data(self, x, p):
''' generate a synthetic data x_new. p is a randomly choosen nearest neighbor of x'''
diff = self.vector_diff(p,x)
random_diff = self.scalar_mul_random(diff)
x_new = self.vector_sum(x, random_diff)
return x_new
def get_knn(self, pockets, headers, k = 10):
''' returns and write-out a dictionary containing k nearest neighbors (knn) '''
knns = {}
for pocket in pockets:
neighbors = []
pocket_name = pocket[0]
for pocket2 in pockets:
if pocket != pocket2:
distance = self.euclidian_distance(pocket[1:], pocket2[1:])
pocket2_name = pocket2[0]
neighbors.append((pocket2_name, distance))
knn = sorted(neighbors, key = lambda neighbor: neighbor[1])[:k]
knns[pocket_name] = knn
return knns, pockets, headers
def get_minority(self, knns):
''' returns the minority class and its neighbors '''
mins = {}
label = 'h_'
for item in knns:
if item[:2] == label:
mins[item] = knns[item]
return mins
def gamma(self, mins):
''' returns a dictionary containing gamma ratio for each data point in the
minority class mins. delta_i is number of neigbors belonging to the
majority class, k total number of neigbors. z is normalizing factor so
that sum of all gammas are 1'''
k = len(mins.items()[0][1])
gammas = {}
for item in mins:
delta_i = 0.0
neighbors = mins[item]
for neighbor in neighbors:
name = neighbor[0]
if name[:2] != item[:2]:
delta_i = delta_i + 1
gamma = delta_i/k
gammas[item] = gamma
norm_gammas = {}
z = sum(gammas.values())
for item in gammas:
gamma = gammas[item]
norm_gamma = gamma/z
norm_gammas[item] = norm_gamma
return norm_gammas
def synthetic_data_number(self, knns, beta = 0.65):
''' returns the number of synteti data needed to balance the classes for
each minority class member. g is the difference between the two classes '''
mins = self.get_minority(knns)
norm_gammas = self.gamma(mins)
g = (len(knns)- len(mins)) * beta
gs = {}
for item in mins:
gamma_i = norm_gammas[item]
g_i = gamma_i * g
g_i = int(round(g_i))
gs[item] = g_i
return gs
def adasyn(self, data, headers, k = 10, beta = 0.65):
''' genearate new samples for each member of the minority class by using
its gamma distribution (norm_gammas) to decide the number of needed
synthetic sample(gs). this is known as adapative synthetic sampling. now
use only neighbor with class h to gernerate the synthetic samples '''
knns, pockets, headers = self.get_knn(data, headers, k)
gs = self.synthetic_data_number(knns, beta)
pocket_dict = {}
for pocket in pockets:
name = pocket[0]
feats = pocket[1:]
pocket_dict[name] = feats
syn_pockets = {}
for item in gs:
g = gs[item]
name_parts = item.split('_')
for i in range(g):
name = [name_parts[0]] + ['syn%s' % str(i)] + name_parts[1:]
name = '_'.join(name)
neighbors = knns[item]
neighbors2 = [neighbor for neighbor in neighbors if neighbor[0][0] == name[0]]
if len(neighbors2) > 1:
random_neighbor = random.choice(neighbors2)[0]
else:
random_neighbor = random.choice(neighbors)[0]
x = pocket_dict[item]
p = pocket_dict[random_neighbor]
new_x = self.generate_synthetic_data(x,p)
syn_pockets[name] = new_x
merged_pockets = pocket_dict.copy()
merged_pockets.update(syn_pockets)
# outname = self.dirs[2] + '/adasyn_k%s_beta%s.txt' % (str(k), str(beta))
# outfile = open(outname, 'w')
# outfile.write(','.join(headers) + '\n')
# for item in merged_pockets:
# content = [item] + [str(x) for x in merged_pockets[item]]
# content = ','.join(content) + '\n'
# outfile.write(content)
# outfile.close()
adasyn_data = [[item] + merged_pockets[item] for item in merged_pockets]
return adasyn_data, headers
def plot_adasyn(self, k, beta):
''' plot adaptive synthetic samples for train data '''
adasyn_data ,adasyn_headers = self.adasyn(self.pocket1, self.pocket1_headers, k, beta)
print self.pocket1
sys.exit()
nz_data, nz_headers = self.get_nonzero_features(adasyn_data, adasyn_headers)
plotname = 'adasyn_k%s_beta%s' % (k, beta)
self.plot_hist(nz_data, nz_headers, plotname)
#### naive bayes #####
# def continuous_index(self, filename):
# ''' returns continius variables indices and thei headers '''
# data, headers = self.get_pocket('p_1', filename)
# data, headers = self.get_nonzero_features2(data, headers)
# con_index = [0]
# con_headers =[headers[0]]
# sample = data[120]
# for i in range(1,len(sample)):
# item = sample[i]
# rem = item%1
# if rem != 0:
# con_index.append(i)
# con_headers.append(headers[i])
# return con_index, con_headers
def find_sample_index(self):
''' find sample index to be used to fetch continuous data '''
train_datas= self.parse_train_txts()
con_counts_collections = []
for train_data in train_datas:
data, headers, name = train_data
data, headers = self.clean_data(data, headers)
con_counts = []
for i, items in enumerate(data):
con_count = 0
for item in items[1:]:
rem = item%1
if rem != 0:
con_count += 1
con_counts.append([con_count, i])
con_counts_collections.append(con_counts)
max_indices = []
for item in con_counts_collections:
con_counts = sorted(item)
max = con_counts[-1][0]
max_index = [x[1] for x in con_counts if x[0] == max]
max_indices.append(max_index)
intersect = set(max_indices[0]).intersection(max_indices[1])
print list(intersect)[0]
return list(intersect)[0]
def continuous_data(self, data, headers, sample_index):
''' returns continuous data. use indices from continuos_index '''
con_index = [0]
con_headers =[headers[0]]
sample = data[sample_index]
for i in range(1,len(sample)):
item = sample[i]
rem = item%1
if rem != 0:
con_index.append(i)
con_headers.append(headers[i])
con_data = []
for item in data:
con = [item[i] for i in con_index]
con_data.append(con)
return con_data, con_headers
# def nominal_index(self, filename):
# ''' returns nominal indices and their headers '''
# data, headers = self.get_pocket('p_1', filename)
# data, headers = self.get_nonzero_features2(data, headers)
# nom_index = [0]
# nom_headers =[headers[0]]
# sample = data[120]
# for i in range(1,len(sample)):
# item = sample[i]
# rem = item%1
# if rem == 0:
# nom_index.append(i)
# nom_headers.append(headers[i])
# return nom_index, nom_headers
def nominal_data(self, data, headers, sampel_index):
''' returns nominal data '''
nom_index = [0]
nom_headers =[headers[0]]
sample = data[sampel_index]
for i in range(1,len(sample)):
item = sample[i]
rem = item%1
if rem == 0:
nom_index.append(i)
nom_headers.append(headers[i])
nom_data = []
for item in data:
nom = [item[i] for i in nom_index]
nom_data.append(nom)
return nom_data, nom_headers
def mean(self, list):
''' compute sample mean '''
mean = sum(list)/float(len(list))
return mean
def demean(self, list, mean):
''' substarct mean from teh samples '''
demean = [x_i - mean for x_i in list]
return demean
def sum_squared(self, list):
''' return sum of squred of a collection '''
ss = sum(x_i**2 for x_i in list)
return ss
def sd(self, list):
''' return standard deviation of a colection '''
mu = self.mean(list)
n = float(len(list))
demean = self.demean(list, mu)
ss = self.sum_squared(demean)
sd = math.sqrt(ss/n)
return sd
def p_normal_cdf(self, x, mu, sigma):
''' return cumulative probabilty from a cumulative distribution function. assumes the
distribution is normal '''
half_sigma = sigma/2.0
x_up = x + sigma
x_down = x - sigma
p_up = (1 + math.erf((x_up-mu)/sigma/math.sqrt(2)))/float(2)
p_down = (1 + math.erf((x_down-mu)/sigma/math.sqrt(2)))/float(2)
p = p_up - p_down
# p = (1 + math.erf((x-mu)/sigma/math.sqrt(2)))/float(2)
return p
def get_params(self, list):
''' returns mu and sigma for a collection '''
mu = self.mean(list)
sd = self.sd(list)
if mu == 0:
mu = 1e-14
if sd == 0:
sd = 1e-14
return mu, sd
def get_params_dict(self, data, headers):
''' retuns parameters dictionary for a given data '''
data, headers = self.rows2columns(data, headers)
params = {}
params[headers[0]] = data[0]
for i in range(1, len(headers)):
header = headers[i]
values = data[i]
mu, sigma = self.get_params(values)
params[header] = mu, sigma
return params
def get_kde_dict(self, data, headers):
''' get a gaussian kernel density estimator for each feature in the data '''
data, headers = self.rows2columns(data, headers)
kdes = {}
kdes[headers[0]] = data[0]
for i in range(1, len(headers)):
header = headers[i]
values = data[i]
kde = scstats.kde.gaussian_kde(values)
minval = min(values)
kdes[header] = kde,minval
return kdes
def split_train_test(self, data, fraction = 0.1):
''' split data to train and test set according to the given percentage'''
n = int(round(fraction*len(data)))
random.shuffle(data)
test = data[:n]
train = data[n:]
return train, test
def join_p(self, list):
''' return a joint probability from a collection '''
join = 1.0
for item in list:
join = join*item
return join
def random_subset_validation(self, data, headers, fraction = 0.5):
''' random cross validate the data. by default, splits 10% for test, 90% for training '''
l, h = self.split_l_h(data)
train_l, test_l = self.split_train_test(l, fraction)
train_h, test_h = self.split_train_test(h, fraction)
test = test_l + test_h
train = train_l + train_h
params_l = self.get_params_dict(train_l, headers)
params_h = self.get_params_dict(train_h, headers)
prior_l, prior_h = float(len(train_l))/len(train), float(len(train_h))/len(train)
predictions = []
for items in test:
probs_l = [prior_l]
probs_h = [prior_h]
sample_name = items[0]
for i in range(1, len(items)):
var_name = headers[i]
mu_l, sigma_l = params_l[var_name]
mu_h, sigma_h = params_h[var_name]
x = items[i]
prob_l = self.p_normal_cdf(x, mu_l, sigma_l)
prob_h = self.p_normal_cdf(x, mu_h, sigma_h)
probs_l.append(prob_l)
probs_h.append(prob_h)
join_l = self.join_p(probs_l)
join_h = self.join_p(probs_h)
p_ratio = join_h/join_l
sample_class = sample_name[0]
if p_ratio > 1:
predicted_class = 'h'
else:
predicted_class = 'l'
prediction = [sample_name, sample_class, predicted_class, p_ratio]
predictions.append(prediction)
return predictions
def confusion_matrix(self, predictions):
''' returns confusion matrix for the given predictions. input follows that
of random_subset_validation format '''
classes = self.split_l_h(predictions)
c_matrix = [[] for x in range(len(classes))]
for i, items in enumerate(classes):
label = items[0][0][0]
c_matrix[i].append(label)
match = 0
notmatch = 0
for item in items:
predicted_label = item[2]
if label == predicted_label:
match += 1
else:
notmatch += 1
c_matrix[i].append(match)
c_matrix[i].append(notmatch)
#print c_matrix
return c_matrix
def kfold_split(self, data, k = 10):
''' split data to kfold '''
fraction = 1.0/k
portion = int(fraction * len(data))
splits = []
for i in range(0, len(data), portion):
split = data[i:i+portion]
splits.append(split)
return splits
def kfold_cross_validation(self, data, headers, k = 10):
''' do k fold validation. default is 10 '''
l, h = self.split_l_h(data)
splits_l = self.kfold_split(l, k)
splits_h = self.kfold_split(h, k)
c_matrices = []
for i in range(len(splits_l)):
test_l, test_h = splits_l[i],splits_h[i]
train_l = []
train_h = []
for i2 in range(len(splits_l)):
if i2 != i:
train_l += splits_l[i2]
train_h += splits_h[i2]
train = train_l + train_h
test = test_l + test_h
test = test_l + test_h
train = train_l + train_h
params_l = self.get_params_dict(train_l, headers)
params_h = self.get_params_dict(train_h, headers)
prior_l, prior_h = float(len(train_l))/len(train), float(len(train_h))/len(train)
predictions = []
for items in test:
probs_l = [prior_l]
probs_h = [prior_h]
sample_name = items[0]
for i in range(1, len(items)):
var_name = headers[i]
mu_l, sigma_l = params_l[var_name]
mu_h, sigma_h = params_h[var_name]
x = items[i]
prob_l = self.p_normal_cdf(x, mu_l, sigma_l)
prob_h = self.p_normal_cdf(x, mu_h, sigma_h)
probs_l.append(prob_l)
probs_h.append(prob_h)
join_l = self.join_p(probs_l)
join_h = self.join_p(probs_h)
p_ratio = join_h/join_l
sample_class = sample_name[0]
if p_ratio > 1:
predicted_class = 'h'
else:
predicted_class = 'l'
prediction = [sample_name, sample_class, predicted_class, p_ratio]
predictions.append(prediction)
c_matrix = self.confusion_matrix(predictions)
c_matrices.append(c_matrix)
ave_c_matrix = [[] for x in range(len(c_matrices[0]))]
for c_matrix in c_matrices:
for i in range(len(ave_c_matrix)):
if len(ave_c_matrix[i]) == 0:
ave_c_matrix[i].append(c_matrix[i][0])
ave_c_matrix[i].append(c_matrix[i][1])
ave_c_matrix[i].append(c_matrix[i][2])
else:
ave_c_matrix[i][1] += c_matrix[i][1]
ave_c_matrix[i][2] += c_matrix[i][2]
for item in ave_c_matrix:
for i in range(1, len(ave_c_matrix)+1):
item[i] = item[i]/float(k)
return ave_c_matrix
def kfold_cross_validation2(self, data, headers, k = 10):
''' do k fold validation. default is 10. now understands discrpancy between
the k splits, some times they are different due to rounding discrepancies '''
l, h = self.split_l_h(data)
splits_l = self.kfold_split(l, k)
splits_h = self.kfold_split(h, k)
if len(splits_h) < len(splits_l):
splits_l = splits_l[:-1]
elif len(splits_h) > len(splits_l):
splits_h = splits_h[:-1]
c_matrices = []
for i in range(len(splits_l)):
test_l, test_h = splits_l[i],splits_h[i]
train_l = []
train_h = []
for i2 in range(len(splits_l)):
if i2 != i:
train_l += splits_l[i2]
train_h += splits_h[i2]
train = train_l + train_h
test = test_l + test_h
#test = test_l + test_h
#train = train_l + train_h
params_l = self.get_params_dict(train_l, headers)
params_h = self.get_params_dict(train_h, headers)
prior_l, prior_h = float(len(train_l))/len(train), float(len(train_h))/len(train)
predictions = []
for items in test:
probs_l = [prior_l]
probs_h = [prior_h]
sample_name = items[0]
for i in range(1, len(items)):
var_name = headers[i]
mu_l, sigma_l = params_l[var_name]
mu_h, sigma_h = params_h[var_name]
#print var_name, mu_l, sigma_l, mu_h, sigma_h
x = items[i]
prob_l = self.p_normal_cdf(x, mu_l, sigma_l)
prob_h = self.p_normal_cdf(x, mu_h, sigma_h)
probs_l.append(prob_l)
probs_h.append(prob_h)
join_l = self.join_p(probs_l)
join_h = self.join_p(probs_h)
p_ratio = join_h/join_l
sample_class = sample_name[0]
if p_ratio > 1:
predicted_class = 'h'
else:
predicted_class = 'l'
prediction = [sample_name, sample_class, predicted_class, p_ratio]
predictions.append(prediction)
c_matrix = self.confusion_matrix(predictions)
c_matrices.append(c_matrix)
ave_c_matrix = [[] for x in range(len(c_matrices[0]))]
for c_matrix in c_matrices:
for i in range(len(ave_c_matrix)):
if len(ave_c_matrix[i]) == 0:
ave_c_matrix[i].append(c_matrix[i][0])
ave_c_matrix[i].append(c_matrix[i][1])
ave_c_matrix[i].append(c_matrix[i][2])
else:
ave_c_matrix[i][1] += c_matrix[i][1]
ave_c_matrix[i][2] += c_matrix[i][2]
for item in ave_c_matrix:
for i in range(1, len(ave_c_matrix)+1):
item[i] = item[i]/float(k)
return ave_c_matrix
def kfold_cross_validation3(self, data, headers, k = 10):
''' do k fold validation. default is 10. Uses gaussian kde to estimate
densitiesnow understands discrpancy between
the k splits, some times they are different due to rounding discrepancies '''
l, h = self.split_l_h(data)
splits_l = self.kfold_split(l, k)
splits_h = self.kfold_split(h, k)
if len(splits_h) < len(splits_l):
splits_l = splits_l[:-1]
elif len(splits_h) > len(splits_l):
splits_h = splits_h[:-1]
c_matrices = []
for i in range(len(splits_l)):
test_l, test_h = splits_l[i],splits_h[i]
train_l = []
train_h = []
for i2 in range(len(splits_l)):
if i2 != i:
train_l += splits_l[i2]
train_h += splits_h[i2]
train = train_l + train_h
test = test_l + test_h
#test = test_l + test_h
#train = train_l + train_h
kdes_l = self.get_kde_dict(train_l, headers)
kdes_h = self.get_kde_dict(train_h, headers)
# prior_l, prior_h = float(len(train_l))/len(train), float(len(train_h))/len(train)
prior_l, prior_h = 0.5, 0.5
predictions = []
for items in test:
probs_l = [prior_l]
probs_h = [prior_h]
sample_name = items[0]
for i in range(1, len(items)):
var_name = headers[i]
#mu_l, sigma_l = params_l[var_name]
#mu_h, sigma_h = params_h[var_name]
#print var_name, mu_l, sigma_l, mu_h, sigma_h
kde_l, min_l = kdes_l[var_name]
kde_h, min_h = kdes_h[var_name]
x = items[i]
prob_l = kde_l.evaluate(x)
prob_h = kde_h.evaluate(x)
# prob_l = kde_l.integrate_box_1d(min_l, x)
# prob_h = kde_h.integrate_box_1d(min_h, x)
probs_l.append(prob_l)
probs_h.append(prob_h)
join_l = self.join_p(probs_l)
join_h = self.join_p(probs_h)
p_ratio = join_h/join_l
sample_class = sample_name[0]
if p_ratio > 1:
predicted_class = 'h'
else:
predicted_class = 'l'
prediction = [sample_name, sample_class, predicted_class, p_ratio]
predictions.append(prediction)
c_matrix = self.confusion_matrix(predictions)
c_matrices.append(c_matrix)
ave_c_matrix = [[] for x in range(len(c_matrices[0]))]
for c_matrix in c_matrices:
for i in range(len(ave_c_matrix)):
if len(ave_c_matrix[i]) == 0:
ave_c_matrix[i].append(c_matrix[i][0])
ave_c_matrix[i].append(c_matrix[i][1])
ave_c_matrix[i].append(c_matrix[i][2])
else:
ave_c_matrix[i][1] += c_matrix[i][1]
ave_c_matrix[i][2] += c_matrix[i][2]
for item in ave_c_matrix:
for i in range(1, len(ave_c_matrix)+1):
item[i] = item[i]/float(k)
return ave_c_matrix
def tp_fp(self, c_matrix):
''' returns true positive false positive rates from c_matrix. assume true=ih'''
rates = []
for item in c_matrix:
rate = float(item[1])/sum(item[1:])
rates.append(rate)
return rates
def tp_fp2(self, c_matrix):
''' returns true positive false positive rates from c_matrix. assume true=ih'''
rates = []
tp = c_matrix[1][1]
fn = c_matrix[1][2]
tn = c_matrix[0][1]
fp = c_matrix[0][2]
tpr = round(tp/(tp+fn),3)
fpr = round(fp/(fp+tn),3)
rates.append(fpr)
rates.append(tpr)
return rates
def roc_beta(self, data, headers, beta_low = 0.2, beta_high = 50, skip = 0.2):
''' returns tp fp rates of betas range from beta_low to beta_h '''
n = int((beta_high-beta_low)/skip)
params = [round(beta_low + i*skip, 2) for i in range(n)]
rates = []
for param in params:
adasyn_data, adasyn_headers = self.adasyn(data, headers, k = 10 , beta = param)
c_matrix = self.kfold_cross_validation2(adasyn_data, headers)
rate = self.tp_fp2(c_matrix)
rate.append(param)
rates.append(rate)
return rates
def write_roc_beta(self, rates, filename = 'roc_betas'):
contents = ''
rates = sorted(rates)
for rate in rates:
content = ','.join(str(round(x, 4)) for x in rate) + '\n'
contents += content
outname = self.dirs[2] + '/%s.txt' % filename
outfile = open(outname, 'w')
outfile.write(contents)
outfile.close()
def plot_roc(self, rates, figname):
''' plot tpr as a function of fpr. this is known as roc curve'''
fprs = [x[0] for x in rates]
tprs = [x[1] for x in rates]
figpath = self.dirs[0]
figname = figpath + '/%s.pdf' % figname
plt.figure()
plt.scatter(fprs, tprs, alpha = 0.5, edgecolor = 'none', color = 'blue')
diags = [0.1*x for x in range(11)]
plt.plot(diags, diags, alpha = 0.5, color = 'orange')
plt.axis([0, 1, 0,1])
plt.xlabel('False positive rate')
plt.ylabel('True positive rate')
plt.savefig(figname)
plt.close()
def plot_train_rocs(self):
''' plots receiver operating characteristics (roc) for each train data in
infiles. writes rates to outfiles as well'''
train_datas = self.parse_train_txts()
for train_data in train_datas:
data, headers, name = train_data
clean_data, clean_headers = self.clean_data_continuous(data, headers,250)
beta_low, beta_high = 0.2, 50
rates = self.roc_beta(clean_data, clean_headers, beta_low, beta_high)
filename = 'roc_beta%s_%s' % (beta_high, name)
self.write_roc_beta(rates, filename)
self.plot_roc(rates, filename)
def parse_rates_file(self, rates_file):
''' parse ratesfile deposited in outfiles dir '''
rates = open(rates_file).read().splitlines()
rates = [x.split(',') for x in rates]
fprs = [x[0] for x in rates]
tprs = [x[1] for x in rates]
return fprs, tprs
def plot_train_rocs2(self):
''' plots train roc from rates files in outfiles dir '''
rates_files = []
outfiles_path = self.dirs[2]
for root, dirs, files in os.walk(outfiles_path):
for file in files:
if 'roc_beta50' in file:
rates_file = os.path.join(root, file)
rates_files.append(rates_file)
plt.figure()
diags = [0.1*x for x in range(11)]
plt.plot(diags, diags, alpha = 0.5, color = 'orange')
colors = ['b', 'g','r','c','m','y']
color_skip = 1.0/len(rates_files)
color_i = [i*color_skip for i in range(len(rates_files))]
color_map = cm.rainbow(color_i)
for i, rates_file in enumerate(rates_files[:]):
names = self.path2names(rates_file)
filename = names[1]
fprs, tprs = self.parse_rates_file(rates_file)
plt.scatter(fprs, tprs, alpha = 0.9, edgecolor = 'none', label = filename, color = color_map[i])
plt.axis([0, 1, 0, 1])
plt.xlabel('False positive rate')
plt.ylabel('True positive rate')
plt.legend(loc = 'lower right', frameon = False, fontsize = 10)
figpath = self.dirs[0] + '/roc_trains.pdf'
plt.savefig(figpath)
#plt.show()
def predict(self, unseens, unseens_headers, beta):
''' returns prediction for unseeen data, params are trained on continuos
data which means the unseens need to be continuos as well'''
con_data, con_headers = self.clean_data_continuous(self.data, self.headers, 250)
train_data, train_headers = self.adasyn(con_data, con_headers, beta = beta)
train_l, train_h = self.split_l_h(train_data)
params_l = self.get_params_dict(train_l, con_headers)
params_h = self.get_params_dict(train_h, con_headers)
prior_l, prior_h = float(len(train_l))/len(train_data), float(len(train_h))/len(train_data)
#prior_l, prior_h = 0.5, 0.5
predictions = []
for items in unseens:
probs_l = [prior_l]
probs_h = [prior_h]
sample_name = items[0]
for i in range(1, len(items)):
var_name = unseens_headers[i]
mu_l, sigma_l = params_l[var_name]
mu_h, sigma_h = params_h[var_name]
x = items[i]
prob_l = self.p_normal_cdf(x, mu_l, sigma_l)
prob_h = self.p_normal_cdf(x, mu_h, sigma_h)
probs_l.append(prob_l)
probs_h.append(prob_h)
join_l = self.join_p(probs_l)
join_h = self.join_p(probs_h)
p_ratio = join_h/join_l
if p_ratio > 1:
predicted_class = 'h'
else:
predicted_class = 'l'
prediction = [sample_name, predicted_class, round(p_ratio, 4)]
predictions.append(prediction)
predictions = sorted(predictions, key = lambda item: item[-1], reverse = True)
return predictions
def file2predictions(self, filepath, beta, ranker):
''' returns predictions for a given file. only consider pocket P_1'''
con_data, con_headers = self.clean_data_continuous(self.data, self.headers,250)
train_data, train_headers = self.adasyn(con_data, con_headers, beta = beta)
train_l, train_h = self.split_l_h(train_data)
params_l = self.get_params_dict(train_l, con_headers)
params_h = self.get_params_dict(train_h, con_headers)
prior_l, prior_h = float(len(train_l))/len(train_data), float(len(train_h))/len(train_data)
#prior_l, prior_h = 0.5, 0.5
unseens, unseens_headers = self.parse_desc_merged_txt(filepath)
unseens, unseens_headers = self.get_pocket2(unseens, unseens_headers)
predictions = []
for items in unseens:
probs_l = [prior_l]
probs_h = [prior_h]
sample_name = items[0]
for i in range(1, len(items)):
var_name = unseens_headers[i]
x = items[i]
if var_name in params_l and x != 0:
mu_l, sigma_l = params_l[var_name]
mu_h, sigma_h = params_h[var_name]
#x = items[i]
prob_l = self.p_normal_cdf(x, mu_l, sigma_l)
prob_h = self.p_normal_cdf(x, mu_h, sigma_h)
probs_l.append(prob_l)
probs_h.append(prob_h)
join_l = self.join_p(probs_l)
join_h = self.join_p(probs_h)
p_ratio = round(join_h/join_l,4)
wp_ratio = round(p_ratio * join_h, 4)
if join_h == 0:
join_h = 1e-14
iwp_ratio = round(p_ratio/join_h, 4)
if p_ratio > 1:
predicted_class = 'h'
else:
predicted_class = 'l'
#prediction = [sample_name, predicted_class, p_ratio, wp_ratio, iwp_ratio]
prediction = [sample_name, predicted_class, p_ratio, wp_ratio]
predictions.append(prediction)
if ranker == 'wp':
predictions = sorted(predictions, key = lambda item: item[-1], reverse = True)
else:
predictions = sorted(predictions, key = lambda item: item[-2], reverse = True)
return predictions, ranker
def file2predictions2(self, filepath, beta, ranker):
''' Uses kde, returns predictions for a given file. only consider pocket P_1'''
con_data, con_headers = self.clean_data_continuous(self.data, self.headers,250)
train_data, train_headers = self.adasyn(con_data, con_headers, beta = beta)
train_l, train_h = self.split_l_h(train_data)
kdes_l = self.get_kde_dict(train_l, con_headers)
kdes_h = self.get_kde_dict(train_h, con_headers)
prior_l, prior_h = float(len(train_l))/len(train_data), float(len(train_h))/len(train_data)
#prior_l, prior_h = 0.5, 0.5
unseens, unseens_headers = self.parse_desc_merged_txt(filepath)
unseens, unseens_headers = self.get_pocket2(unseens, unseens_headers)
predictions = []
for items in unseens:
probs_l = [prior_l]
probs_h = [prior_h]
sample_name = items[0]
for i in range(1, len(items)):
var_name = unseens_headers[i]
if var_name in kdes_l:
x = items[i]
kde_l = kdes_l[var_name][0]
kde_h = kdes_h[var_name][0]
prob_l = kde_l(x)
prob_h = kde_h(x)
probs_l.append(prob_l)
probs_h.append(prob_h)
join_l = self.join_p(probs_l)
join_h = self.join_p(probs_h)
p_ratio = round(join_h/join_l,4)
wp_ratio = round(p_ratio * join_h, 4)
if join_h == 0:
join_h = 1e-14
iwp_ratio = round(p_ratio/join_h, 4)
if p_ratio > 1:
predicted_class = 'h'
else:
predicted_class = 'l'
#prediction = [sample_name, predicted_class, p_ratio, wp_ratio, iwp_ratio]
prediction = [sample_name, predicted_class, p_ratio, wp_ratio]
predictions.append(prediction)
if ranker == 'wp':
predictions = sorted(predictions, key = lambda item: item[-1], reverse = True)
else:
predictions = sorted(predictions, key = lambda item: item[-2], reverse = True)
return predictions, ranker
def top_n_predicted(self, predictions, n):
''' writes outfile for top n predictions from predictions list '''
top_n = predictions[:n]
contents = ''
contents += 'name,predicted_class,p_ratio \n'
for prediction in top_n:
content = ','.join(str(item) for item in prediction) + '\n'
contents += content
outdir = self.dirs[2]
outname = outdir + '/top%s_predicted.txt' % n
outfile = open(outname, 'w')
outfile.write(contents)
outfile.close()
def file2top_predicted(self, filepath, n, beta, ranker):
''' writes outfile for top n preditions from a given file '''
predictions, ranker = self.file2predictions(filepath, beta, ranker)
names = self.path2names(filepath)
top_n = predictions[:n]
contents = ''
#contents += 'name,predicted_class,p_ratio,wp_ratio,iwp_ratio\n'
contents += 'name,predicted_class,p_ratio,wp_ratio\n'
for prediction in top_n:
content = ','.join(str(item) for item in prediction) + '\n'
contents += content
outdir = self.dirs[2]
outname = outdir + '/%s_top%s_predicted_%s_%s.txt' % (names[1], n, ranker, beta)
outfile = open(outname, 'w')
outfile.write(contents)
outfile.close()
def file2top_predicted2(self, filepath, n, beta, ranker, outdir):
''' writes outfile for top n preditions from a given file '''
predictions, ranker = self.file2predictions(filepath, beta, ranker)
names = self.path2names(filepath)
top_n = predictions[:n]
contents = ''
#contents += 'name,predicted_class,p_ratio,wp_ratio,iwp_ratio\n'
contents += 'name,predicted_class,p_ratio,wp_ratio\n'
for prediction in top_n:
content = ','.join(str(item) for item in prediction) + '\n'
contents += content
#outdir = self.dirs[2]
outname = outdir + '/%s_top%s_predicted_%s_%s.txt' % (names[1], n, ranker, beta)
outfile = open(outname, 'w')
outfile.write(contents)
outfile.close()
### publication stuff ##
def plot_hist_mult(self, data, headers, name):
''' plots multiple histograms on a single plot '''
l_data, h_data = self.split_l_h(data)
cl_datas = self.rows2columns(l_data, headers)
ch_datas = self.rows2columns(h_data, headers)
cl_data = cl_datas[0]
ch_data = ch_datas[0]
for i in range(1, len(headers))[:]:
fignum = i
rownum, colnum = 5,3
plt.subplot(rownum, colnum,fignum)
plt.hist(cl_data[i], bins = 50, alpha = 0.5, edgecolor = 'none', label='l')
plt.hist(ch_data[i], bins = 50, alpha = 0.5, edgecolor = 'none', label='h')
plt.tick_params(axis = 'both', labelsize = 6)
#plt.legend()
plt.locator_params(axis='x', nbins=4)
plt.locator_params(axis='y', nbins=4)
title = headers[i]
plt.title(title, fontsize=10)
plt.xlabel('values',fontsize=6)
plt.ylabel('frequency',fontsize=6)
plt.tight_layout()
figname = name + '_mult_hist.pdf'
figpath = self.dirs[0] + '/' + figname
plt.savefig(figpath)
plt.close()
def tab_cmatrix(self,cmatrix, name):
'''tables a confussion matrix '''
tp,fp = cmatrix[1][1], cmatrix[1][2]
tn,fn = cmatrix[0][1], cmatrix[0][2]
headers = ['Predicted high', 'Predicted low']
cmatrix2 = [['True high',tp,fp],['True low',fn,tn]]
tablecontent = tabulate(cmatrix2, headers, tablefmt='latex')
tablename = name + '_cmatrix.tex'
tablepath = self.dirs[1] + '/' + tablename
tableout = open(tablepath, 'w')
tableout.write(tablecontent)
def tab_fp_tp(self,rates, name):
'''tables tpr and fpr rates, rates are in the format [fpr, tpr] '''
headers = ['FPR', 'TPR']
rates = [rates, ['','']]
tablecontent = tabulate(rates, headers, tablefmt='latex')
tablename = name + '_fp_tp.tex'
tablepath = self.dirs[1] + '/' + tablename
tableout = open(tablepath, 'w')
tableout.write(tablecontent)
def plot_train3_roc_rates(self):
''' returns paths for train3 fpr and tpr (rates) files'''
rates_paths = self.find_files_from_path('roc_beta50_train3', self.dirs[2])
fp_tp_list = []
plotnames = []
for rates_path in rates_paths:
names = self.path2names(rates_path)
filenames = names[-1].split('_')
plotname = filenames[-1][:-4]
rates = self.parse_rates_file(rates_path)
fp_tp_list.append(rates)
plotnames.append(plotname)
color_numbers = len(plotnames)
color_skip = 1.0/color_numbers
color_indices = [i*color_skip for i in range(color_numbers)]
color_map = cm.rainbow(color_indices)
for i,plotname in enumerate(plotnames):
fp, tp = fp_tp_list[i]
color_index = color_map[i]
#plt.scatter(fp, tp, alpha = 0.5, label=plotname, edgecolor='none', color=color_index)
plt.scatter(fp, tp, alpha = 0.75, label=plotname, edgecolor='none', color=color_index)
plt.plot([0,1],[0,1], color = 'orange')
plt.axis([0,1,0,1])
plt.legend(frameon=False, loc='lower right', fontsize=10)
plt.xlabel('FPR')
plt.ylabel('TPR')
figname = 'roc_mult_scatter2.pdf'
figpath = self.dirs[0] + '/' + figname
plt.savefig(figpath)
def ranker_test(self):
''' returns rankers distribution for the given test betas'''
test_betas = [0.65, 6.5]
rankers = ['p', 'wp']
iters = range(100)
ranker_predictions = {}
for ranker in rankers:
beta_results = []
means = []
sds = []
for beta in test_betas:
beta_result = []
for i in iters:
#predictions, ranker = self.file2predictions('/Users/rahmadakbar/uc/enri/enri_rc9/infiles/train3.txt',beta,ranker)
predictions, ranker = self.file2predictions('/Users/rahmadakbar/uc/enri/enri_rc9/infiles/hddata.tsv',beta,ranker)
top10 = predictions[:10]
hcount = 0.0
for item in top10:
if item[0][0] == 'h':
hcount += 1
fraction = hcount/10
beta_result.append(fraction)
mean = self.mean(beta_result)
sd = self.sd(beta_result)
beta_results.append(beta_result)
means.append(mean)
sds.append(sd)
ranker_predictions[ranker] = beta_results, means, sds, test_betas
for i, ranker in enumerate(ranker_predictions):
fignum = i
rownum, colnum = 1,2
plt.subplot(rownum,colnum,fignum)
fractions, means, sds, betas = ranker_predictions[ranker]
label0 = 'beta %s, mean %s, sd %s' % (betas[0], round(means[0],3), round(sds[0],3))
label1 = 'beta %s, mean %s, sd %s' % (betas[1], round(means[1],3), round(sds[1],3))
plt.axis([0,100,0,100])
x1,x2 = np.array(fractions[0])*100, np.array(fractions[1])*100
plt.hist(x1, bins=25, edgecolor='none', alpha=0.5, label=label0)
plt.hist(x2, bins=25, edgecolor='none', alpha=0.5, label=label1)
plt.legend(frameon=False, fontsize = 10)
plt.title(ranker)
plt.xlabel('percent correct in top 10')
plt.ylabel('frequency')
plt.tight_layout()
#plotname = 'rankers_hist_train3.pdf'
plotname = 'rankers_hist_train3_hd.pdf'
plotpath = self.dirs[0] + '/' + plotname
plt.savefig(plotpath)
plt.close()
os.system('open %s' % plotpath)
def ranker_test_kde(self):
''' returns rankers distribution for the given test betas'''
test_betas = [0.65, 6.5]
rankers = ['p', 'wp']
iters = range(100)
ranker_predictions = {}
for ranker in rankers:
beta_results = []
means = []
sds = []
for beta in test_betas:
beta_result = []
for i in iters:
predictions, ranker = self.file2predictions2('/Users/rahmadakbar/uc/enri/enri_rc9/infiles/train3.txt',beta,ranker)
top10 = predictions[:10]
hcount = 0.0
for item in top10:
if item[0][0] == 'h':
hcount += 1
fraction = hcount/10
beta_result.append(fraction)
mean = self.mean(beta_result)
sd = self.sd(beta_result)
beta_results.append(beta_result)
means.append(mean)
sds.append(sd)
ranker_predictions[ranker] = beta_results, means, sds, test_betas
for i, ranker in enumerate(ranker_predictions):
fignum = i
rownum, colnum = 1,2
plt.subplot(rownum,colnum,fignum)
fractions, means, sds, betas = ranker_predictions[ranker]
label0 = 'beta %s, mean %s, sd %s' % (betas[0], round(means[0],3), round(sds[0],3))
label1 = 'beta %s, mean %s, sd %s' % (betas[1], round(means[1],3), round(sds[1],3))
plt.axis([0.2,1,0,100])
plt.hist(fractions[0], bins=30, edgecolor='none', alpha=0.5, label=label0)
plt.hist(fractions[1], bins=30, edgecolor='none', alpha=0.5, label=label1)
plt.legend(frameon=False, fontsize = 10)
plt.title(ranker)
plt.xlabel('% correct in top 10')
plt.tight_layout()
plotname = 'rankers_train3kde_hist.pdf'
plotpath = self.dirs[0] + '/' + plotname
plt.savefig(plotpath)
plt.close()
def parse_tsv(self, filepath):
''' parses tsv file from a given path '''
contents = open(filepath).read().splitlines()
headers = contents[0].split('\t')
newcontents = []
for item in contents[1:]:
newcontent = item.split('\t')
newcontents.append(newcontent)
return newcontents, headers
def tab_ef(self, filepath):
''' tabulates ef_table.tsv'''
data, headers = self.parse_tsv(filepath)
table = tabulate(data, headers, tablefmt='latex')
path, name, name_ext = self.path2names(filepath)
tablepath = path + '/' + '%s.tex' % name
tablefile = open(tablepath, 'w')
tablefile.write(table)
tablefile.close()
def pub_stuff(self):
''' do stuff for the publication'''
clean_data, clean_headers = self.clean_data_continuous(self.pocket1, self.pocket1_headers, 250)
self.plot_hist_mult(clean_data, clean_headers, 'train3_2')
# predictions = self.predict(clean_data, clean_headers,0.5)
# orig_data, orig_headers = self.adasyn(clean_data, clean_headers,10,0)
# cmatrix1 = self.kfold_cross_validation2(orig_data, orig_headers)
# balance_data, balance_headers = self.adasyn(clean_data, clean_headers,10,0.65)
# cmatrix2 = self.kfold_cross_validation2(balance_data, balance_headers)
# rates1, rates2 = self.tp_fp2(cmatrix1), self.tp_fp2(cmatrix2)
# print rates1
# self.tab_cmatrix(cmatrix1, 'train3_orig')
# self.tab_cmatrix(cmatrix2, 'train3_balanced')
# self.tab_fp_tp(rates1, 'train3_orig')
# self.tab_fp_tp(rates2, 'train3_balanced')
# self.plot_hist_mult(balance_data, balance_headers, 'train3_balanced2')
# self.plot_train3_roc_rates()
# self.ranker_test()
# l, h = self.split_l_h(self.pocket1)
# print len(l), len(h)
# print len(self.pocket1)
# self.tab_ef('/Users/rahmadakbar/uc/enri/enri_write/tables/ef_mmm.tsv')
#### vh feedbacks ####
def plot_cds(self, cds1, cds2, header):
''' plots cumulative density values from gkde '''
plt.plot([item[0] for item in cds1], [item[1] for item in cds1], label = 'l', color = 'blue')
plt.plot([item[0] for item in cds2], [item[1] for item in cds2], label = 'h', color = 'green')
plt.title(header, fontsize = 10)
plt.locator_params(nbins=6)
plt.tick_params(labelsize = 6)
def kstest_twosamples(self, sample1, sample2, header):
''' ks- test 2 samples ,returns data points and the corresponding cdf'''
min1, min2 = min(sample1), min(sample2)
gkde1 = scstats.gaussian_kde(sample1) #pdf estimates
gkde2 = scstats.gaussian_kde(sample2) #pdf estimates
#self.plot_cfs(cf1,cf2, header)
Ds = []
cd1s = [] #cumulative densities
cd2s = [] #cumulative densities
if len(sample1) >= len(sample2):
for item in sample1:
cd1 = gkde1.integrate_box_1d(min1,item)
cd2 = gkde2.integrate_box_1d(min2,item)
D = abs(cd1-cd2)
Ds.append((D,item))
cd1s.append((item, cd1))
cd2s.append((item, cd2))
else:
for item in sample2:
cd1 = gkde1.integrate_box_1d(min1,item)
cd2 = gkde2.integrate_box_1d(min2,item)
D = abs(cd1-cd2)
Ds.append((D,item))
cd1s.append((item, cd1))
cd2s.append((item, cd2))
#self.plot_cds(cd1s,cd2s, header)
maxD = max(Ds)
return maxD,Ds, cd1s, cd2s
def plot_cd_mult(self, data, headers, name):
''' plots multiple density plots on a single plot '''
l, h = self.split_l_h(data)
l_cols, l_headers = self.rows2columns(l, headers)
h_cols, h_headers = self.rows2columns(h, headers)
nrows, ncols = 5,3
sum_maxD = 0
for i in range(2, len(l_cols)):
maxD, cd1s, cd2s = self.kstest_twosamples(l_cols[i], h_cols[i], headers[i])
print maxD, headers[i]
sum_maxD += maxD[0]
plot_number = i
cd1s, cd2s = sorted(cd1s), sorted(cd2s)
plt.subplot(nrows, ncols, plot_number)
self.plot_cds(cd1s, cd2s, headers[i])
print sum_maxD
plt.tight_layout()
figname = name + '_mult_plot.pdf'
figpath = self.dirs[0] + '/' + figname
plt.savefig(figpath)
plt.close()
def tabulate_maxD(self, data, headers, name):
''' tabulate cumulative densities '''
l, h = self.split_l_h(data)
l_cols, l_headers = self.rows2columns(l, headers)
h_cols, h_headers = self.rows2columns(h, headers)
maxDs = []
for i in range(1, len(l_cols)):
maxD,Ds, cd1s, cd2s = self.kstest_twosamples(l_cols[i], h_cols[i], headers[i])
featurename = headers[i]
Dvals = [item[0] for item in Ds]
D, x = maxD[0], maxD[1]
Ds = [item[0] for item in Ds]
mu,sd = np.mean(Ds), np.std(Ds)
#p = self.p_normal_cdf(x,0,sd)
p1 = (1 + math.erf((x)/sd/math.sqrt(2)))/float(2)-1e-14
p1 = (1-p1)
p = (1 + math.erf((mu)/sd/math.sqrt(2)))/float(2)-1e-14
pval = format(1-p,'.3g')
p2 = stats.ttest_1samp(Dvals,0)
print pval, p1, p2
D = round(D,3)
maxDs.append([featurename, D, pval])
sys.exit()
maxDs = sorted(maxDs, key=lambda item: item[1],reverse = True)
table_headers = ['Feature', 'D', 'Pval']
tablecontent = tabulate(maxDs, table_headers, tablefmt='latex')
tablename = name + '_Dstatistics.tex'
tablepath = self.dirs[1] + '/' + tablename
tableout = open(tablepath, 'w')
tableout.write(tablecontent)
def hdstats_data(self, data, headers):
''' selects only high D statisitics features, returns only selected
featuress as new data '''
wanted_headers = ['name', 'poc_cov', 'ell c/a', 'hydrophobicity', 'simpleScore', 'drugScore']
#wanted_headers = ['name', 'surface', 'lid', 'hull','ell c/a', 'hydrophobicity', 'simpleScore', 'drugScore']
wanted_indices = []
for i, header in enumerate(headers):
if header in wanted_headers:
wanted_indices.append(i)
hdstats_data = []
for sample in data:
#new_item = [item[i] for i,item in enumerate(item) if i in wanted_indices]
new_sample = []
for i,item in enumerate(sample):
if i in wanted_indices:
new_sample.append(sample[i])
hdstats_data.append(new_sample)
return hdstats_data, wanted_headers
def write_tsv(self, data, headers, name):
''' writes a tab separated values file from the given data and headers'''
content = ''
content += '\t'.join(headers) + '\n'
for item in data:
new_item = '\t'.join([str(x) for x in item]) + '\n'
content += new_item
outname = name + '.tsv'
outpath = self.dirs[2] + '/' + outname
outfile = open(outpath, 'w')
outfile.write(content)
outfile.close()
def vh_feedbacks(self):
''' do stuff from vh feedbacks'''
clean_data, clean_headers = self.clean_data_continuous(self.pocket1, self.pocket1_headers, 250)
l, h = self.split_l_h(clean_data)
l_cols, l_headers = self.rows2columns(l, clean_headers)
h_cols, h_headers = self.rows2columns(h, clean_headers)
# print self.kstest_twosamples(l_cols[13], h_cols[13], l_headers[13])[0]
# self.plot_cd_mult(clean_data, clean_headers, 'cds_train3')
# balance_data, balance_headers = self.adasyn(clean_data, clean_headers,10,0.65)
# self.plot_cd_mult(balance_data, balance_headers, 'cds_train3_balanced')
# self.tabulate_maxD(clean_data, clean_headers, 'train3')
# self.tabulate_maxD(balance_data, balance_headers, 'train3_balance')
# hdstats_data, hdstats_headers = self.hdstats_data(clean_data, clean_headers)
# hdstatsbalance_data, hdstatsbalance_headers = self.adasyn(hdstats_data, hdstats_headers,10,0.65)
# cmatrix_hdstats = self.kfold_cross_validation3(hdstats_data, hdstats_headers)
# cmatrix_train3kde = self.kfold_cross_validation3(clean_data, clean_headers)
# cmatrix_hdstatsbalance = self.kfold_cross_validation3(hdstatsbalance_data, hdstatsbalance_headers)
# self.tab_cmatrix(cmatrix_hdstats, 'train3_hdstats_kde')
# self.tab_cmatrix(cmatrix_hdstatsbalance, 'train3_hdstatsbalance_kde')
# rates_hdstats, rates_hdstatsbalance = self.tp_fp2(cmatrix_hdstats), self.tp_fp2(cmatrix_hdstatsbalance)
# self.tab_fp_tp(rates_hdstats, 'hdstats_kde')
# self.tab_fp_tp(rates_hdstatsbalance, 'hdstatsbalance_kde')
# rates_train3kde = self.tp_fp2(cmatrix_train3kde)
# self.tab_fp_tp(rates_train3kde, 'train3kde')
# cmatrix_train3balancekde = self.kfold_cross_validation3(balance_data, balance_headers)
# rates_train3balancekde = self.tp_fp2(cmatrix_train3balancekde)
# self.tab_fp_tp(rates_train3balancekde, 'train3balancekde')
# self.write_tsv(hdstats_data, hdstats_headers, 'hdstat_data')
# self.ranker_test()
# self.get_kde_dict(clean_data, clean_headers)
### miscellaneous functions ###
def select_adescriptor(self, filepath, pocket, variable):
'''selects and outputs feature according to the given arguments '''
variable_index = ''
data, headers = self.parse_train_txt2(filepath)
coldata, colheaders = self.rows2columns(data,headers)
select_data = [coldata[0]]
select_headers = [colheaders[0]]
for i,header in enumerate(colheaders):
if variable == header:
select_data.append(coldata[i])
select_headers.append(header)
break
path, name, name_ext = self.path2names(filepath)
outpath = path + '/' + name + '_' + variable + '.txt'
outfile = open(outpath, 'w')
content = select_headers[0] + '\t' + select_headers[1] + '\n'
for i, item in enumerate(select_data[0]):
itempocket = '_'.join(item.split('_')[-2:])
if pocket == itempocket:
content += item + '\t' + str(select_data[1][i]) + '\n'
outfile.write(content)
outfile.close
# vh feedbacks2#
def write_latextable(self,tcontent,theaders, tname):
'''
Writes table in latex format to tables dir
'''
tpath = self.dirs[1] + '/%s.tex' % tname
filecontent = tabulate(tcontent,theaders, tablefmt = 'latex')
tablefile = open(tpath,'w')
tablefile.write(filecontent)
tablefile.close()
def check_traindata(self):
'''
Checks train data, how many proteins, how many conformations in each class.
'''
trd, trh = self.parse_train_txt()
cd, ch = self.clean_data_continuous(trd, trh,250)
pdb = {}
for datum in cd:
name = datum[0]
nameparts = name.split('_')
#pdbid = '_'.join(nameparts[1:3])
pdbid = nameparts[1]
label = nameparts[0]
if pdbid not in pdb:
pdb[pdbid] = [0,0] # init dict, values [h,l]
if label == 'h':
pdb[pdbid][0] += 1
elif label == 'l':
pdb[pdbid][1] += 1
total_l = 0
total_h = 0
theaders = ['pdbid','high', 'low']
tcontents = []
for pdbid in pdb:
values = pdb[pdbid]
total_l += values[1]
total_h += values[0]
tcontent = [pdbid] + values
tcontents.append(tcontent)
tcontents.append(['sum', total_h, total_l])
filename = 'train_h_l'
print pdb
print len(pdb)
print total_l + total_h
self.write_latextable(tcontents, theaders,filename)
def feedbacks2(self):
'''
'''
self.check_traindata()
#end vh feedbacs2#
#vh feedbacs3#
def feedbacks3_wf(self):
'''
Workflow for feedback 3
'''
data, headers = self.clean_data_continuous(self.pocket1, self.pocket1_headers, 250)
bdata, bheaders = self.adasyn(data, headers,10,0.65)
#self.plot_hist_mult(data, headers,'train3')
#self.plot_hist_mult(bdata, bheaders,'train3_balanced')
#hddata, hdheaders = self.hdstats_data(data, headers)
#self.tabulate_maxD(data, headers, 'train3')
self.tabulate_maxD(bdata, bheaders, 'train3_balanced')
#self.ranker_test()
#self.ranker_test() #hddata
#end vh feedbacs3#
####### usage examples and development notes ######
e = Enri()
#print e.find_files('.pdb')
#e.rename_files('.pdb')
#e.pdb2desc('.pdb')
#e.name2firstcol('desc.txt')
#e.merge_edt()
#e.file2top_predicted('/Users/rahmadakbar/uc/enri/desc_merged/3l3x_desc_merged.txt',10,6.5,'wp')
#e.pub_stuff()
#e.vh_feedbacks()
#e.plot_adasyn(10,0.65)
#e.feedbacks2()
e.feedbacks3_wf()
|
fibonaccirabbits/enri
|
enri.py
|
Python
|
mit
| 69,117
|
[
"Gaussian"
] |
b9395e4fc2e2c8ca1f07928b4ec8287726d3b71a18e0f5db21dc32bc076491b0
|
# -*- coding: utf-8 -*-
from __future__ import print_function, division
# This program is part of 'MOOSE', the
# Messaging Object Oriented Simulation Environment.
# Copyright (C) 2013 Upinder S. Bhalla. and NCBS
# It is made available under the terms of the
# GNU Lesser General Public License version 2.1
# See the file COPYING.LIB for the full notice.
# Monday 17 September 2018 01:49:30 PM IST
# - Converted to a test script
import math
import numpy as np
import moose
print( "[INFO ] Using moose from %s" % moose.__file__ )
def test_SS_solver():
compartment = makeModel()
ksolve = moose.Ksolve( '/model/compartment/ksolve' )
stoich = moose.Stoich( '/model/compartment/stoich' )
stoich.compartment = compartment
stoich.ksolve = ksolve
stoich.path = "/model/compartment/##"
state = moose.SteadyState( '/model/compartment/state' )
moose.reinit()
state.stoich = stoich
state.convergenceCriterion = 1e-6
moose.seed( 111 ) # Used when generating the samples in state space
b = moose.element( '/model/compartment/b' )
a = moose.element( '/model/compartment/a' )
c = moose.element( '/model/compartment/c' )
a.concInit = 0.1
deltaA = 0.002
num = 150
avec = []
bvec = []
moose.reinit()
# Now go up.
for i in range( 0, num ):
moose.start( 1.0 ) # Run the model for 1 seconds.
state.settle() # This function finds the steady states.
avec.append( a.conc )
bvec.append( b.conc )
a.concInit += deltaA
aa, bb = avec, bvec
got = np.mean(aa), np.std(aa)
expected = 0.24899, 0.08660
assert np.isclose(got, expected, atol = 1e-4).all(), "Got %s, expected %s" % (got, expected)
print( "[INFO ] Test 1 PASSED" )
# Now go down.
avec = []
bvec = []
for i in range( 0, num ):
moose.start( 1.0 ) # Run the model for 1 seconds.
state.settle() # This function finds the steady states.
avec.append( a.conc )
bvec.append( b.conc )
a.concInit -= deltaA
aa, bb = avec, bvec
got = np.mean(aa), np.std(aa)
expected = 0.251, 0.0866
assert np.isclose(got, expected, atol = 1e-4).all(), "Got %s, expected %s" % (got, expected)
print( "[INFO ] Test 2 PASSED" )
# Now aim for the middle. We do this by judiciously choosing a
# start point that should be closer to the unstable fixed point.
avec = []
bvec = []
a.concInit = 0.28
b.conc = 0.15
for i in range( 0, 65 ):
moose.start( 1.0 ) # Run the model for 1 seconds.
state.settle() # This function finds the steady states.
avec.append( a.conc )
bvec.append( b.conc )
a.concInit -= deltaA
aa, bb = avec, bvec
got = np.mean(aa), np.std(aa)
expected = 0.216, 0.03752
assert np.isclose(got, expected, atol = 1e-4).all(), "Got %s, expected %s" % (got, expected)
print( "[INFO ] Test 3 PASSED" )
def makeModel():
""" This function creates a bistable reaction system using explicit
MOOSE calls rather than load from a file.
The reaction is::
a ---b---> 2b # b catalyzes a to form more of b.
2b ---c---> a # c catalyzes b to form a.
a <======> 2b # a interconverts to b.
"""
# create container for model
model = moose.Neutral( 'model' )
compartment = moose.CubeMesh( '/model/compartment' )
compartment.volume = 1e-15
# the mesh is created automatically by the compartment
mesh = moose.element( '/model/compartment/mesh' )
# create molecules and reactions
a = moose.BufPool( '/model/compartment/a' )
b = moose.Pool( '/model/compartment/b' )
c = moose.Pool( '/model/compartment/c' )
enz1 = moose.Enz( '/model/compartment/b/enz1' )
enz2 = moose.Enz( '/model/compartment/c/enz2' )
cplx1 = moose.Pool( '/model/compartment/b/enz1/cplx' )
cplx2 = moose.Pool( '/model/compartment/c/enz2/cplx' )
reac = moose.Reac( '/model/compartment/reac' )
# connect them up for reactions
moose.connect( enz1, 'sub', a, 'reac' )
moose.connect( enz1, 'prd', b, 'reac' )
moose.connect( enz1, 'prd', b, 'reac' ) # Note 2 molecules of b.
moose.connect( enz1, 'enz', b, 'reac' )
moose.connect( enz1, 'cplx', cplx1, 'reac' )
moose.connect( enz2, 'sub', b, 'reac' )
moose.connect( enz2, 'sub', b, 'reac' ) # Note 2 molecules of b.
moose.connect( enz2, 'prd', a, 'reac' )
moose.connect( enz2, 'enz', c, 'reac' )
moose.connect( enz2, 'cplx', cplx2, 'reac' )
moose.connect( reac, 'sub', a, 'reac' )
moose.connect( reac, 'prd', b, 'reac' )
moose.connect( reac, 'prd', b, 'reac' ) # Note 2 order in b.
# Assign parameters
a.concInit = 1
b.concInit = 0
c.concInit = 0.01
enz1.kcat = 0.4
enz1.Km = 4
enz2.kcat = 0.6
enz2.Km = 0.01
reac.Kf = 0.001
reac.Kb = 0.01
return compartment
def main():
test_SS_solver()
# Run the 'main' if this script is executed standalone.
if __name__ == '__main__':
main()
|
BhallaLab/moose-core
|
tests/core/test_steady_state_solver.py
|
Python
|
gpl-3.0
| 5,039
|
[
"MOOSE"
] |
c11ffb7cfc7df1979ea9f1d6572d2dfa5f81b1450685c2aeef918a9b23398e47
|
#
# @file TestCVTerms_newSetters.py
# @brief CVTerms unit tests
#
# @author Akiya Jouraku (Python conversion)
# @author Sarah Keating
#
# $Id: TestCVTerms_newSetters.py 11441 2010-07-09 02:22:23Z mhucka $
# $HeadURL: https://sbml.svn.sourceforge.net/svnroot/sbml/trunk/libsbml/src/bindings/python/test/annotation/TestCVTerms_newSetters.py $
#
# ====== WARNING ===== WARNING ===== WARNING ===== WARNING ===== WARNING ======
#
# DO NOT EDIT THIS FILE.
#
# This file was generated automatically by converting the file located at
# src/annotation/test/TestCVTerms_newSetters.c
# using the conversion program dev/utilities/translateTests/translateTests.pl.
# Any changes made here will be lost the next time the file is regenerated.
#
# -----------------------------------------------------------------------------
# This file is part of libSBML. Please visit http://sbml.org for more
# information about SBML, and the latest version of libSBML.
#
# Copyright 2005-2010 California Institute of Technology.
# Copyright 2002-2005 California Institute of Technology and
# Japan Science and Technology Corporation.
#
# This library is free software; you can redistribute it and/or modify it
# under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation. A copy of the license agreement is provided
# in the file named "LICENSE.txt" included with this software distribution
# and also available online as http://sbml.org/software/libsbml/license.html
# -----------------------------------------------------------------------------
import sys
import unittest
import libsbml
class TestCVTerms_newSetters(unittest.TestCase):
def test_CVTerm_addResource(self):
term = libsbml.CVTerm(libsbml.MODEL_QUALIFIER)
resource = "GO6666";
self.assert_( term != None )
self.assert_( term.getQualifierType() == libsbml.MODEL_QUALIFIER )
i = term.addResource( "")
self.assert_( i == libsbml.LIBSBML_OPERATION_FAILED )
xa = term.getResources()
self.assert_( xa.getLength() == 0 )
i = term.addResource(resource)
self.assert_( i == libsbml.LIBSBML_OPERATION_SUCCESS )
xa = term.getResources()
self.assert_( xa.getLength() == 1 )
self.assert_(( "rdf:resource" == xa.getName(0) ))
self.assert_(( "GO6666" == xa.getValue(0) ))
_dummyList = [ term ]; _dummyList[:] = []; del _dummyList
pass
def test_CVTerm_removeResource(self):
term = libsbml.CVTerm(libsbml.MODEL_QUALIFIER)
resource = "GO6666";
self.assert_( term != None )
self.assert_( term.getQualifierType() == libsbml.MODEL_QUALIFIER )
term.addResource(resource)
xa = term.getResources()
self.assert_( xa.getLength() == 1 )
i = term.removeResource( "CCC")
self.assert_( i == libsbml.LIBSBML_INVALID_ATTRIBUTE_VALUE )
xa = term.getResources()
self.assert_( xa.getLength() == 1 )
i = term.removeResource(resource)
self.assert_( i == libsbml.LIBSBML_OPERATION_SUCCESS )
xa = term.getResources()
self.assert_( xa.getLength() == 0 )
_dummyList = [ term ]; _dummyList[:] = []; del _dummyList
pass
def test_CVTerm_setBiolQualifierType(self):
term = libsbml.CVTerm(libsbml.BIOLOGICAL_QUALIFIER)
self.assert_( term != None )
self.assert_( term.getQualifierType() == libsbml.BIOLOGICAL_QUALIFIER )
self.assert_( term.getModelQualifierType() == libsbml.BQM_UNKNOWN )
self.assert_( term.getBiologicalQualifierType() == libsbml.BQB_UNKNOWN )
i = term.setBiologicalQualifierType(libsbml.BQB_IS)
self.assert_( i == libsbml.LIBSBML_OPERATION_SUCCESS )
self.assert_( term.getQualifierType() == libsbml.BIOLOGICAL_QUALIFIER )
self.assert_( term.getBiologicalQualifierType() == libsbml.BQB_IS )
self.assert_( term.getModelQualifierType() == libsbml.BQM_UNKNOWN )
i = term.setQualifierType(libsbml.MODEL_QUALIFIER)
self.assert_( i == libsbml.LIBSBML_OPERATION_SUCCESS )
self.assert_( term.getQualifierType() == libsbml.MODEL_QUALIFIER )
self.assert_( term.getModelQualifierType() == libsbml.BQM_UNKNOWN )
self.assert_( term.getBiologicalQualifierType() == libsbml.BQB_UNKNOWN )
i = term.setBiologicalQualifierType(libsbml.BQB_IS)
self.assert_( i == libsbml.LIBSBML_INVALID_ATTRIBUTE_VALUE )
self.assert_( term.getQualifierType() == libsbml.MODEL_QUALIFIER )
self.assert_( term.getModelQualifierType() == libsbml.BQM_UNKNOWN )
self.assert_( term.getBiologicalQualifierType() == libsbml.BQB_UNKNOWN )
_dummyList = [ term ]; _dummyList[:] = []; del _dummyList
pass
def test_CVTerm_setModelQualifierType(self):
term = libsbml.CVTerm(libsbml.MODEL_QUALIFIER)
self.assert_( term != None )
self.assert_( term.getQualifierType() == libsbml.MODEL_QUALIFIER )
self.assert_( term.getModelQualifierType() == libsbml.BQM_UNKNOWN )
self.assert_( term.getBiologicalQualifierType() == libsbml.BQB_UNKNOWN )
i = term.setModelQualifierType(libsbml.BQM_IS)
self.assert_( i == libsbml.LIBSBML_OPERATION_SUCCESS )
self.assert_( term.getQualifierType() == libsbml.MODEL_QUALIFIER )
self.assert_( term.getModelQualifierType() == libsbml.BQM_IS )
self.assert_( term.getBiologicalQualifierType() == libsbml.BQB_UNKNOWN )
i = term.setQualifierType(libsbml.BIOLOGICAL_QUALIFIER)
self.assert_( i == libsbml.LIBSBML_OPERATION_SUCCESS )
self.assert_( term.getQualifierType() == libsbml.BIOLOGICAL_QUALIFIER )
self.assert_( term.getModelQualifierType() == libsbml.BQM_UNKNOWN )
self.assert_( term.getBiologicalQualifierType() == libsbml.BQB_UNKNOWN )
i = term.setModelQualifierType(libsbml.BQM_IS)
self.assert_( i == libsbml.LIBSBML_INVALID_ATTRIBUTE_VALUE )
self.assert_( term.getQualifierType() == libsbml.BIOLOGICAL_QUALIFIER )
self.assert_( term.getBiologicalQualifierType() == libsbml.BQB_UNKNOWN )
self.assert_( term.getModelQualifierType() == libsbml.BQM_UNKNOWN )
_dummyList = [ term ]; _dummyList[:] = []; del _dummyList
pass
def suite():
suite = unittest.TestSuite()
suite.addTest(unittest.makeSuite(TestCVTerms_newSetters))
return suite
if __name__ == "__main__":
if unittest.TextTestRunner(verbosity=1).run(suite()).wasSuccessful() :
sys.exit(0)
else:
sys.exit(1)
|
alexholehouse/SBMLIntegrator
|
libsbml-5.0.0/src/bindings/python/test/annotation/TestCVTerms_newSetters.py
|
Python
|
gpl-3.0
| 6,280
|
[
"VisIt"
] |
669cf1b241c29c9b35e6bd0d5a218fddd793d736d389aafe71343593d94ed821
|
'''
'''
from __future__ import print_function
import shutil
from os.path import dirname, exists, join, realpath, relpath
import os, re, subprocess, sys, time
import versioneer
# provide fallbacks for highlights in case colorama is not installed
try:
import colorama
from colorama import Fore, Style
def bright(text): return "%s%s%s" % (Style.BRIGHT, text, Style.RESET_ALL)
def dim(text): return "%s%s%s" % (Style.DIM, text, Style.RESET_ALL)
def red(text): return "%s%s%s" % (Fore.RED, text, Style.RESET_ALL)
def green(text): return "%s%s%s" % (Fore.GREEN, text, Style.RESET_ALL)
def yellow(text): return "%s%s%s" % (Fore.YELLOW, text, Style.RESET_ALL)
sys.platform == "win32" and colorama.init()
except ImportError:
def bright(text): return text
def dim(text): return text
def red(text) : return text
def green(text) : return text
def yellow(text) : return text
# some functions prompt for user input, handle input vs raw_input (py2 vs py3)
if sys.version_info[0] < 3:
input = raw_input # NOQA
# -----------------------------------------------------------------------------
# Module global variables
# -----------------------------------------------------------------------------
ROOT = dirname(realpath(__file__))
BOKEHJSROOT = join(ROOT, 'bokehjs')
BOKEHJSBUILD = join(BOKEHJSROOT, 'build')
CSS = join(BOKEHJSBUILD, 'css')
JS = join(BOKEHJSBUILD, 'js')
SERVER = join(ROOT, 'bokeh/server')
# -----------------------------------------------------------------------------
# Helpers for command line operations
# -----------------------------------------------------------------------------
def show_bokehjs(bokehjs_action, develop=False):
''' Print a useful report after setuptools output describing where and how
BokehJS is installed.
Args:
bokehjs_action (str) : one of 'built', 'installed', or 'packaged'
how (or if) BokehJS was installed into the python source tree
develop (bool, optional) :
whether the command was for "develop" mode (default: False)
Returns:
None
'''
print()
if develop:
print("Installed Bokeh for DEVELOPMENT:")
else:
print("Installed Bokeh:")
if bokehjs_action in ['built', 'installed']:
print(" - using %s built BokehJS from bokehjs/build\n" % (bright(yellow("NEWLY")) if bokehjs_action=='built' else bright(yellow("PREVIOUSLY"))))
else:
print(" - using %s BokehJS, located in 'bokeh.server.static'\n" % bright(yellow("PACKAGED")))
print()
def show_help(bokehjs_action):
''' Print information about extra Bokeh-specific command line options.
Args:
bokehjs_action (str) : one of 'built', 'installed', or 'packaged'
how (or if) BokehJS was installed into the python source tree
Returns:
None
'''
print()
if bokehjs_action in ['built', 'installed']:
print("Bokeh-specific options available with 'install' or 'develop':")
print()
print(" --build-js build and install a fresh BokehJS")
print(" --install-js install only last previously built BokehJS")
else:
print("Bokeh is using PACKAGED BokehJS, located in 'bokeh.server.static'")
print()
print("No extra Bokeh-specific options are available.")
print()
# -----------------------------------------------------------------------------
# Other functions used directly by setup.py
# -----------------------------------------------------------------------------
def build_or_install_bokehjs():
''' Build a new BokehJS (and install it) or install a previously build
BokehJS.
If no options ``--build-js`` or ``--install-js`` are detected, the
user is prompted for what to do.
If ``--existing-js`` is detected, then this setup.py is being run from a
packaged sdist, no action is taken.
Note that ``-build-js`` is only compatible with the following ``setup.py``
commands: install, develop, sdist, egg_info, build
Returns:
str : one of 'built', 'installed', 'packaged'
How (or if) BokehJS was installed into the python source tree
'''
# This happens when building from inside a published, pre-packaged sdist
# The --existing-js option is not otherwise documented
if '--existing-js' in sys.argv:
sys.argv.remove('--existing-js')
return "packaged"
if '--build-js' not in sys.argv and '--install-js' not in sys.argv:
jsbuild = jsbuild_prompt()
elif '--build-js' in sys.argv:
jsbuild = True
sys.argv.remove('--build-js')
# must be "--install-js"
else:
jsbuild = False
sys.argv.remove('--install-js')
jsbuild_ok = ('install', 'develop', 'sdist', 'egg_info', 'build')
if jsbuild and not any(arg in sys.argv for arg in jsbuild_ok):
print("Error: Option '--build-js' only valid with 'install', 'develop', 'sdist', or 'build', exiting.")
sys.exit(1)
if jsbuild:
build_js()
install_js()
return "built"
else:
install_js()
return "installed"
def fixup_building_sdist():
''' Check for 'sdist' and ensure we always build BokehJS when packaging
Source distributions do not ship with BokehJS source code, but must ship
with a pre-built BokehJS library. This function modifies ``sys.argv`` as
necessary so that ``--build-js`` IS present, and ``--install-js` is NOT.
Returns:
None
'''
if "sdist" in sys.argv:
if "--install-js" in sys.argv:
print("Removing '--install-js' incompatible with 'sdist'")
sys.argv.remove('--install-js')
if "--build-js" not in sys.argv:
print("Adding '--build-js' required for 'sdist'")
sys.argv.append('--build-js')
def fixup_for_packaged():
''' If we are installing FROM an sdist, then a pre-built BokehJS is
already installed in the python source tree.
The command line options ``--build-js`` or ``--install-js`` are
removed from ``sys.argv``, with a warning.
Also adds ``--existing-js`` to ``sys.argv`` to signal that BokehJS is
already packaged.
Returns:
None
'''
if exists(join(ROOT, 'PKG-INFOvi ')):
if "--build-js" in sys.argv or "--install-js" in sys.argv:
print(SDIST_BUILD_WARNING)
if "--build-js" in sys.argv:
sys.argv.remove('--build-js')
if "--install-js" in sys.argv:
sys.argv.remove('--install-js')
if "--existing-js" not in sys.argv:
sys.argv.append('--existing-js')
def fixup_old_jsargs():
''' Fixup (and warn about) old style command line options with underscores.
This function modifies ``sys.argv`` to make the replacements:
* ``--build_js`` to --build-js
* ``--install_js`` to --install-js
and prints a warning about their deprecation.
Returns:
None
'''
for i in range(len(sys.argv)):
if sys.argv[i] == '--build_js':
print("WARNING: --build_js (with underscore) is deprecated, use --build-js")
sys.argv[i] = '--build-js'
if sys.argv[i] == '--install_js':
print("WARNING: --install_js (with underscore) is deprecated, use --install-js")
sys.argv[i] = '--install-js'
# Horrible hack: workaround to allow creation of bdist_wheel on pip
# installation. Why, for God's sake, is pip forcing the generation of wheels
# when installing a package?
def get_cmdclass():
''' A ``cmdclass`` that works around a setuptools deficiency.
There is no need to build wheels when installing a package, however some
versions of setuptools seem to mandate this. This is a hacky workaround
that modifies the ``cmdclass`` returned by versioneer so that not having
wheel installed is not a fatal error.
'''
cmdclass = versioneer.get_cmdclass()
try:
from wheel.bdist_wheel import bdist_wheel
except ImportError:
# pip is not claiming for bdist_wheel when wheel is not installed
bdist_wheel = None
if bdist_wheel is not None:
cmdclass["bdist_wheel"] = bdist_wheel
return cmdclass
def get_package_data():
''' All of all of the "extra" package data files collected by the
``package_files`` and ``package_path`` functions in ``setup.py``.
'''
return { 'bokeh': _PACKAGE_DATA }
def get_version():
''' The version of Bokeh currently checked out
Returns:
str : the version string
'''
return versioneer.get_version()
# -----------------------------------------------------------------------------
# Helpers for operation in the bokehjs dir
# -----------------------------------------------------------------------------
def jsbuild_prompt():
''' Prompt users whether to build a new BokehJS or install an existing one.
Returns:
bool : True, if a new build is requested, False otherwise
'''
print(BOKEHJS_BUILD_PROMPT)
mapping = {"1": True, "2": False}
value = input("Choice? ")
while value not in mapping:
print("Input '%s' not understood. Valid choices: 1, 2\n" % value)
value = input("Choice? ")
return mapping[value]
# -----------------------------------------------------------------------------
# Helpers for operations in the bokehjs dir
# -----------------------------------------------------------------------------
def build_js():
''' Build BokehJS files (CSS, JS, etc) under the ``bokehjs`` source
subdirectory.
Also prints a table of statistics about the generated assets (file sizes,
etc.) or any error messages if the build fails.
Note this function only builds BokehJS assets, it does not install them
into the python source tree.
'''
print("Building BokehJS... ", end="")
sys.stdout.flush()
os.chdir('bokehjs')
if sys.platform != "win32":
cmd = [join('node_modules', '.bin', 'gulp'), 'build']
else:
cmd = [join('node_modules', '.bin', 'gulp.cmd'), 'build']
t0 = time.time()
try:
proc = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
except OSError as e:
print(BUILD_EXEC_FAIL_MSG % (cmd, e))
sys.exit(1)
finally:
os.chdir('..')
result = proc.wait()
t1 = time.time()
if result != 0:
indented_msg = ""
outmsg = proc.stdout.read().decode('ascii', errors='ignore')
outmsg = "\n".join([" " + x for x in outmsg.split("\n")])
errmsg = proc.stderr.read().decode('ascii', errors='ignore')
errmsg = "\n".join([" " + x for x in errmsg.split("\n")])
print(BUILD_FAIL_MSG % (red(outmsg), red(errmsg)))
sys.exit(1)
indented_msg = ""
msg = proc.stdout.read().decode('ascii', errors='ignore')
pat = re.compile(r"(\[.*\]) (.*)", re.DOTALL)
for line in msg.strip().split("\n"):
m = pat.match(line)
if not m: continue # skip generate.py output lines
stamp, txt = m.groups()
indented_msg += " " + dim(green(stamp)) + " " + dim(txt) + "\n"
msg = "\n".join([" " + x for x in msg.split("\n")])
print(BUILD_SUCCESS_MSG % indented_msg)
print("Build time: %s" % bright(yellow("%0.1f seconds" % (t1-t0))))
print()
print("Build artifact sizes:")
try:
def size(*path):
return os.stat(join("bokehjs", "build", *path)).st_size / 2**10
print(" - bokeh.js : %6.1f KB" % size("js", "bokeh.js"))
print(" - bokeh.css : %6.1f KB" % size("css", "bokeh.css"))
print(" - bokeh.min.js : %6.1f KB" % size("js", "bokeh.min.js"))
print(" - bokeh.min.css : %6.1f KB" % size("css", "bokeh.min.css"))
print(" - bokeh-widgets.js : %6.1f KB" % size("js", "bokeh-widgets.js"))
print(" - bokeh-widgets.css : %6.1f KB" % size("css", "bokeh-widgets.css"))
print(" - bokeh-widgets.min.js : %6.1f KB" % size("js", "bokeh-widgets.min.js"))
print(" - bokeh-widgets.min.css : %6.1f KB" % size("css", "bokeh-widgets.min.css"))
print(" - bokeh-api.js : %6.1f KB" % size("js", "bokeh-api.js"))
print(" - bokeh-api.min.js : %6.1f KB" % size("js", "bokeh-api.min.js"))
except Exception as e:
print(BUILD_SIZE_FAIL_MSG % e)
sys.exit(1)
def install_js():
''' Copy built BokehJS files into the Python source tree.
Returns:
None
'''
target_jsdir = join(SERVER, 'static', 'js')
target_cssdir = join(SERVER, 'static', 'css')
STATIC_ASSETS = [
join(JS, 'bokeh.js'),
join(JS, 'bokeh.min.js'),
join(CSS, 'bokeh.css'),
join(CSS, 'bokeh.min.css'),
]
if not all([exists(a) for a in STATIC_ASSETS]):
print(BOKEHJS_INSTALL_FAIL)
sys.exit(1)
if exists(target_jsdir):
shutil.rmtree(target_jsdir)
shutil.copytree(JS, target_jsdir)
if exists(target_cssdir):
shutil.rmtree(target_cssdir)
shutil.copytree(CSS, target_cssdir)
# -----------------------------------------------------------------------------
# Helpers for collecting package data
# -----------------------------------------------------------------------------
_PACKAGE_DATA = []
def package_files(*paths):
'''
'''
_PACKAGE_DATA.extend(paths)
def package_path(path, filters=()):
'''
'''
if not os.path.exists(path):
raise RuntimeError("packaging non-existent path: %s" % path)
elif os.path.isfile(path):
_PACKAGE_DATA.append(relpath(path, 'bokeh'))
else:
for path, dirs, files in os.walk(path):
path = relpath(path, 'bokeh')
for f in files:
if not filters or f.endswith(filters):
_PACKAGE_DATA.append(join(path, f))
# -----------------------------------------------------------------------------
# Status and error message strings
# -----------------------------------------------------------------------------
BOKEHJS_BUILD_PROMPT = """
Bokeh includes a JavaScript library (BokehJS) that has its own
build process. How would you like to handle BokehJS:
1) build and install fresh BokehJS
2) install last built BokehJS from bokeh/bokehjs/build
"""
BOKEHJS_INSTALL_FAIL = """
ERROR: Cannot install BokehJS: files missing in `./bokehjs/build`.
Please build BokehJS by running setup.py with the `--build-js` option.
Dev Guide: http://bokeh.pydata.org/docs/dev_guide.html#bokehjs.
"""
BUILD_EXEC_FAIL_MSG = bright(red("Failed.")) + """
ERROR: subprocess.Popen(%r) failed to execute:
%s
Have you run `npm install` from the bokehjs subdirectory?
For more information, see the Dev Guide:
http://bokeh.pydata.org/en/latest/docs/dev_guide.html
"""
BUILD_FAIL_MSG = bright(red("Failed.")) + """
ERROR: 'gulp build' returned the following
---- on stdout:
%s
---- on stderr:
%s
"""
BUILD_SIZE_FAIL_MSG = """
ERROR: could not determine sizes:
%s
"""
BUILD_SUCCESS_MSG = bright(green("Success!")) + """
Build output:
%s"""
SDIST_BUILD_WARNING = """
Source distribution (sdist) packages come with PRE-BUILT BokehJS files.
Building/installing from the bokehjs source directory of sdist packages is
disabled, and the options --build-js and --install-js will be IGNORED.
To build or develop BokehJS yourself, you must clone the full Bokeh GitHub
repository from https://github.com/bokeh/bokeh
"""
|
draperjames/bokeh
|
_setup_support.py
|
Python
|
bsd-3-clause
| 15,550
|
[
"GULP"
] |
1597a421404f6386db9b2adfaa985032876294da2c8b3d8cac4ed49fe145fb8f
|
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# Copyright 2012 Hewlett-Packard Development Company, L.P.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Matcher classes to be used inside of the testtools assertThat framework."""
import pprint
from lxml import etree
import six
from testtools import content
import testtools.matchers
class DictKeysMismatch(object):
def __init__(self, d1only, d2only):
self.d1only = d1only
self.d2only = d2only
def describe(self):
return ('Keys in d1 and not d2: %(d1only)s.'
' Keys in d2 and not d1: %(d2only)s' %
{'d1only': self.d1only, 'd2only': self.d2only})
def get_details(self):
return {}
class DictMismatch(object):
def __init__(self, key, d1_value, d2_value):
self.key = key
self.d1_value = d1_value
self.d2_value = d2_value
def describe(self):
return ("Dictionaries do not match at %(key)s."
" d1: %(d1_value)s d2: %(d2_value)s" %
{'key': self.key, 'd1_value': self.d1_value,
'd2_value': self.d2_value})
def get_details(self):
return {}
class DictMatches(object):
def __init__(self, d1, approx_equal=False, tolerance=0.001):
self.d1 = d1
self.approx_equal = approx_equal
self.tolerance = tolerance
def __str__(self):
return 'DictMatches(%s)' % (pprint.pformat(self.d1))
# Useful assertions
def match(self, d2):
"""Assert two dicts are equivalent.
This is a 'deep' match in the sense that it handles nested
dictionaries appropriately.
NOTE:
If you don't care (or don't know) a given value, you can specify
the string DONTCARE as the value. This will cause that dict-item
to be skipped.
"""
d1keys = set(self.d1.keys())
d2keys = set(d2.keys())
if d1keys != d2keys:
d1only = sorted(d1keys - d2keys)
d2only = sorted(d2keys - d1keys)
return DictKeysMismatch(d1only, d2only)
for key in d1keys:
d1value = self.d1[key]
d2value = d2[key]
try:
error = abs(float(d1value) - float(d2value))
within_tolerance = error <= self.tolerance
except (ValueError, TypeError):
# If both values aren't convertible to float, just ignore
# ValueError if arg is a str, TypeError if it's something else
# (like None)
within_tolerance = False
if hasattr(d1value, 'keys') and hasattr(d2value, 'keys'):
matcher = DictMatches(d1value)
did_match = matcher.match(d2value)
if did_match is not None:
return did_match
elif 'DONTCARE' in (d1value, d2value):
continue
elif self.approx_equal and within_tolerance:
continue
elif d1value != d2value:
return DictMismatch(key, d1value, d2value)
class ListLengthMismatch(object):
def __init__(self, len1, len2):
self.len1 = len1
self.len2 = len2
def describe(self):
return ('Length mismatch: len(L1)=%(len1)d != '
'len(L2)=%(len2)d' % {'len1': self.len1, 'len2': self.len2})
def get_details(self):
return {}
class DictListMatches(object):
def __init__(self, l1, approx_equal=False, tolerance=0.001):
self.l1 = l1
self.approx_equal = approx_equal
self.tolerance = tolerance
def __str__(self):
return 'DictListMatches(%s)' % (pprint.pformat(self.l1))
# Useful assertions
def match(self, l2):
"""Assert a list of dicts are equivalent."""
l1count = len(self.l1)
l2count = len(l2)
if l1count != l2count:
return ListLengthMismatch(l1count, l2count)
for d1, d2 in zip(self.l1, l2):
matcher = DictMatches(d2,
approx_equal=self.approx_equal,
tolerance=self.tolerance)
did_match = matcher.match(d1)
if did_match:
return did_match
class SubDictMismatch(object):
def __init__(self,
key=None,
sub_value=None,
super_value=None,
keys=False):
self.key = key
self.sub_value = sub_value
self.super_value = super_value
self.keys = keys
def describe(self):
if self.keys:
return "Keys between dictionaries did not match"
else:
return("Dictionaries do not match at %s. d1: %s d2: %s"
% (self.key,
self.super_value,
self.sub_value))
def get_details(self):
return {}
class IsSubDictOf(object):
def __init__(self, super_dict):
self.super_dict = super_dict
def __str__(self):
return 'IsSubDictOf(%s)' % (self.super_dict)
def match(self, sub_dict):
"""Assert a sub_dict is subset of super_dict."""
if not set(sub_dict.keys()).issubset(set(self.super_dict.keys())):
return SubDictMismatch(keys=True)
for k, sub_value in sub_dict.items():
super_value = self.super_dict[k]
if isinstance(sub_value, dict):
matcher = IsSubDictOf(super_value)
did_match = matcher.match(sub_value)
if did_match is not None:
return did_match
elif 'DONTCARE' in (sub_value, super_value):
continue
else:
if sub_value != super_value:
return SubDictMismatch(k, sub_value, super_value)
class FunctionCallMatcher(object):
def __init__(self, expected_func_calls):
self.expected_func_calls = expected_func_calls
self.actual_func_calls = []
def call(self, *args, **kwargs):
func_call = {'args': args, 'kwargs': kwargs}
self.actual_func_calls.append(func_call)
def match(self):
dict_list_matcher = DictListMatches(self.expected_func_calls)
return dict_list_matcher.match(self.actual_func_calls)
class XMLMismatch(object):
"""Superclass for XML mismatch."""
def __init__(self, state):
self.path = str(state)
self.expected = state.expected
self.actual = state.actual
def describe(self):
return "%(path)s: XML does not match" % {'path': self.path}
def get_details(self):
return {
'expected': content.text_content(self.expected),
'actual': content.text_content(self.actual),
}
class XMLDocInfoMismatch(XMLMismatch):
"""XML version or encoding doesn't match."""
def __init__(self, state, expected_doc_info, actual_doc_info):
super(XMLDocInfoMismatch, self).__init__(state)
self.expected_doc_info = expected_doc_info
self.actual_doc_info = actual_doc_info
def describe(self):
return ("%(path)s: XML information mismatch(version, encoding) "
"expected version %(expected_version)s, "
"expected encoding %(expected_encoding)s; "
"actual version %(actual_version)s, "
"actual encoding %(actual_encoding)s" %
{'path': self.path,
'expected_version': self.expected_doc_info['version'],
'expected_encoding': self.expected_doc_info['encoding'],
'actual_version': self.actual_doc_info['version'],
'actual_encoding': self.actual_doc_info['encoding']})
class XMLTagMismatch(XMLMismatch):
"""XML tags don't match."""
def __init__(self, state, idx, expected_tag, actual_tag):
super(XMLTagMismatch, self).__init__(state)
self.idx = idx
self.expected_tag = expected_tag
self.actual_tag = actual_tag
def describe(self):
return ("%(path)s: XML tag mismatch at index %(idx)d: "
"expected tag <%(expected_tag)s>; "
"actual tag <%(actual_tag)s>" %
{'path': self.path, 'idx': self.idx,
'expected_tag': self.expected_tag,
'actual_tag': self.actual_tag})
class XMLAttrKeysMismatch(XMLMismatch):
"""XML attribute keys don't match."""
def __init__(self, state, expected_only, actual_only):
super(XMLAttrKeysMismatch, self).__init__(state)
self.expected_only = ', '.join(sorted(expected_only))
self.actual_only = ', '.join(sorted(actual_only))
def describe(self):
return ("%(path)s: XML attributes mismatch: "
"keys only in expected: %(expected_only)s; "
"keys only in actual: %(actual_only)s" %
{'path': self.path, 'expected_only': self.expected_only,
'actual_only': self.actual_only})
class XMLAttrValueMismatch(XMLMismatch):
"""XML attribute values don't match."""
def __init__(self, state, key, expected_value, actual_value):
super(XMLAttrValueMismatch, self).__init__(state)
self.key = key
self.expected_value = expected_value
self.actual_value = actual_value
def describe(self):
return ("%(path)s: XML attribute value mismatch: "
"expected value of attribute %(key)s: %(expected_value)r; "
"actual value: %(actual_value)r" %
{'path': self.path, 'key': self.key,
'expected_value': self.expected_value,
'actual_value': self.actual_value})
class XMLTextValueMismatch(XMLMismatch):
"""XML text values don't match."""
def __init__(self, state, expected_text, actual_text):
super(XMLTextValueMismatch, self).__init__(state)
self.expected_text = expected_text
self.actual_text = actual_text
def describe(self):
return ("%(path)s: XML text value mismatch: "
"expected text value: %(expected_text)r; "
"actual value: %(actual_text)r" %
{'path': self.path, 'expected_text': self.expected_text,
'actual_text': self.actual_text})
class XMLUnexpectedChild(XMLMismatch):
"""Unexpected child present in XML."""
def __init__(self, state, tag, idx):
super(XMLUnexpectedChild, self).__init__(state)
self.tag = tag
self.idx = idx
def describe(self):
return ("%(path)s: XML unexpected child element <%(tag)s> "
"present at index %(idx)d" %
{'path': self.path, 'tag': self.tag, 'idx': self.idx})
class XMLExpectedChild(XMLMismatch):
"""Expected child not present in XML."""
def __init__(self, state, tag, idx):
super(XMLExpectedChild, self).__init__(state)
self.tag = tag
self.idx = idx
def describe(self):
return ("%(path)s: XML expected child element <%(tag)s> "
"not present at index %(idx)d" %
{'path': self.path, 'tag': self.tag, 'idx': self.idx})
class XMLMatchState(object):
"""Maintain some state for matching.
Tracks the XML node path and saves the expected and actual full
XML text, for use by the XMLMismatch subclasses.
"""
def __init__(self, expected, actual):
self.path = []
self.expected = expected
self.actual = actual
def __enter__(self):
pass
def __exit__(self, exc_type, exc_value, exc_tb):
self.path.pop()
return False
def __str__(self):
return '/' + '/'.join(self.path)
def node(self, tag, idx):
"""Adds tag and index to the path; they will be popped off when
the corresponding 'with' statement exits.
:param tag: The element tag
:param idx: If not None, the integer index of the element
within its parent. Not included in the path
element if None.
"""
if idx is not None:
self.path.append("%s[%d]" % (tag, idx))
else:
self.path.append(tag)
return self
class XMLMatches(object):
"""Compare XML strings. More complete than string comparison."""
SKIP_TAGS = (etree.Comment, etree.ProcessingInstruction)
@staticmethod
def _parse(text_or_bytes):
if isinstance(text_or_bytes, six.text_type):
text_or_bytes = text_or_bytes.encode("utf-8")
parser = etree.XMLParser(encoding="UTF-8")
return etree.parse(six.BytesIO(text_or_bytes), parser)
def __init__(self, expected, allow_mixed_nodes=False,
skip_empty_text_nodes=True, skip_values=('DONTCARE',)):
self.expected_xml = expected
self.expected = self._parse(expected)
self.allow_mixed_nodes = allow_mixed_nodes
self.skip_empty_text_nodes = skip_empty_text_nodes
self.skip_values = set(skip_values)
def __str__(self):
return 'XMLMatches(%r)' % self.expected_xml
def match(self, actual_xml):
actual = self._parse(actual_xml)
state = XMLMatchState(self.expected_xml, actual_xml)
expected_doc_info = self._get_xml_docinfo(self.expected)
actual_doc_info = self._get_xml_docinfo(actual)
if expected_doc_info != actual_doc_info:
return XMLDocInfoMismatch(state, expected_doc_info,
actual_doc_info)
result = self._compare_node(self.expected.getroot(),
actual.getroot(), state, None)
if result is False:
return XMLMismatch(state)
elif result is not True:
return result
@staticmethod
def _get_xml_docinfo(xml_document):
return {'version': xml_document.docinfo.xml_version,
'encoding': xml_document.docinfo.encoding}
def _compare_text_nodes(self, expected, actual, state):
expected_text = [expected.text]
expected_text.extend(child.tail for child in expected)
actual_text = [actual.text]
actual_text.extend(child.tail for child in actual)
if self.skip_empty_text_nodes:
expected_text = [text for text in expected_text
if text and not text.isspace()]
actual_text = [text for text in actual_text
if text and not text.isspace()]
if self.skip_values.intersection(
expected_text + actual_text):
return
if self.allow_mixed_nodes:
# lets sort text nodes because they can be mixed
expected_text = sorted(expected_text)
actual_text = sorted(actual_text)
if expected_text != actual_text:
return XMLTextValueMismatch(state, expected_text, actual_text)
def _compare_node(self, expected, actual, state, idx):
"""Recursively compares nodes within the XML tree."""
# Start by comparing the tags
if expected.tag != actual.tag:
return XMLTagMismatch(state, idx, expected.tag, actual.tag)
with state.node(expected.tag, idx):
# Compare the attribute keys
expected_attrs = set(expected.attrib.keys())
actual_attrs = set(actual.attrib.keys())
if expected_attrs != actual_attrs:
expected_only = expected_attrs - actual_attrs
actual_only = actual_attrs - expected_attrs
return XMLAttrKeysMismatch(state, expected_only, actual_only)
# Compare the attribute values
for key in expected_attrs:
expected_value = expected.attrib[key]
actual_value = actual.attrib[key]
if self.skip_values.intersection(
[expected_value, actual_value]):
continue
elif expected_value != actual_value:
return XMLAttrValueMismatch(state, key, expected_value,
actual_value)
# Compare text nodes
text_nodes_mismatch = self._compare_text_nodes(
expected, actual, state)
if text_nodes_mismatch:
return text_nodes_mismatch
# Compare the contents of the node
matched_actual_child_idxs = set()
# first_actual_child_idx - pointer to next actual child
# used with allow_mixed_nodes=False ONLY
# prevent to visit actual child nodes twice
first_actual_child_idx = 0
for expected_child in expected:
if expected_child.tag in self.SKIP_TAGS:
continue
related_actual_child_idx = None
if self.allow_mixed_nodes:
first_actual_child_idx = 0
for actual_child_idx in range(
first_actual_child_idx, len(actual)):
if actual[actual_child_idx].tag in self.SKIP_TAGS:
first_actual_child_idx += 1
continue
if actual_child_idx in matched_actual_child_idxs:
continue
# Compare the nodes
result = self._compare_node(expected_child,
actual[actual_child_idx],
state, actual_child_idx)
first_actual_child_idx += 1
if result is not True:
if self.allow_mixed_nodes:
continue
else:
return result
else: # nodes match
related_actual_child_idx = actual_child_idx
break
if related_actual_child_idx is not None:
matched_actual_child_idxs.add(actual_child_idx)
else:
return XMLExpectedChild(state, expected_child.tag,
actual_child_idx + 1)
# Make sure we consumed all nodes in actual
for actual_child_idx, actual_child in enumerate(actual):
if (actual_child.tag not in self.SKIP_TAGS and
actual_child_idx not in matched_actual_child_idxs):
return XMLUnexpectedChild(state, actual_child.tag,
actual_child_idx)
# The nodes match
return True
class EncodedByUTF8(object):
def match(self, obj):
if isinstance(obj, six.binary_type):
if hasattr(obj, "decode"):
try:
obj.decode("utf-8")
except UnicodeDecodeError:
return testtools.matchers.Mismatch(
"%s is not encoded in UTF-8." % obj)
else:
reason = ("Type of '%(obj)s' is '%(obj_type)s', "
"should be '%(correct_type)s'."
% {
"obj": obj,
"obj_type": type(obj).__name__,
"correct_type": six.binary_type.__name__
})
return testtools.matchers.Mismatch(reason)
|
sebrandon1/nova
|
nova/tests/unit/matchers.py
|
Python
|
apache-2.0
| 20,022
|
[
"VisIt"
] |
7f7b68e9db7e9e4baeafff0077f8e4b133842de53f1fadc221d11dd4fa8b315f
|
#!/usr/bin/env python2
import sys
#sys.path.append('/data/antares/aux')
#sys.path.append('/mnt/data/antares/aux/sne.space/')
#sys.path.append('/home/antares/nfs_share/tzaidi/HonorsThesisTZ/ThesisCode/classification/')
sys.path.append('../classification')
import os
import glob
import json
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.mlab import rec2csv, rec2txt
from astropy.visualization import hist
from collections import Counter, OrderedDict
from ANTARES_object import LAobject
import scipy.interpolate as scinterp
from mpi4py import MPI
import pickle
import bandMap
# since the claimedtype in the sne.space data is ordered by time (newest claimedtype first)
# it makes sense to store this, and keep a count of how many studies agree with that type
# this effectively decides what the final classification should be
# since, of course, people don't actually agree on type, despite the spectra
class OrderedCounter(Counter, OrderedDict):
"""
trivial implementation of an ordered counter
"""
pass
def check_bad_types(ntype):
if ntype == 'Candidate' or\
ntype.endswith('?') or\
ntype =='I' or\
ntype.startswith('Star') or\
ntype.startswith('CV') or\
ntype.startswith('AGN') or\
ntype.startswith('LBV') or\
ntype == 'Radio':
return True
else:
return False
def GProcessing():
"""
This method does the heavy lifting of actually processing all the sne.space lightcurves
Each lightcurve is read in parallel with MPI, and has to pass various cuts
A dictionary of all the objects is built up, containing auxillary information on the object
as well as the status of processing and the output of the processing
If it fails the cuts, the object is not used, and simply marked as failed
If it passes the cuts, a gaussian process is used to attempt to smooth the light curve in each band
Individual bands are treated separately, and allowed to fail independent of other bands
If all the bands fail, the object is marked as having failed, even if it did pass the cuts
(as no useful data was extracted)
We attempt to align the lightcurves in an absolute sense (i.e. max to fixed phase)
rather than relative to each other (as this processing is done in parallel, and we don't have that info)
^^^ Not the case the lightcurves are not aligned relative to one another
This is deemed unnecessary because the wavelet transforms are approximately translation invariant
A file is written out per object with the gaussian process smoothed data
"""
source = "../data/OSC/raw/"
destination = "../data/OSC/parsed/"
# setup the MPI process, and divide up the files for processing
# this division is just by number of files, not relative amount of data in each file
comm = MPI.COMM_WORLD
rank = comm.Get_rank()
procs_num = comm.Get_size()
print(procs_num, rank)
sne_files = glob.glob(source + 'sne-*/*json')
sne_files = np.random.permutation(sne_files)
nfiles = len(sne_files)
quotient = nfiles/procs_num+1
P = rank*quotient
Q = (rank+1)*quotient
if P > nfiles:
P = nfiles
if Q > nfiles:
Q = nfiles
print(procs_num, rank, nfiles, quotient, P, Q)
dict_list = []
# setup quantities to test the presence/absence of
badflags = ("kcorrected", "scorrected", "mcorrected", "upperlimit", "lowerlimit")
goodflags = ('time', 'magnitude', 'e_magnitude', 'band')
# setup the quantities we want to save for each object, as well as default datatypes for each one
obj_data = OrderedDict()
obj_data_keys = OrderedDict([('ra',np.nan),('dec',np.nan),('maxdate',np.nan),('redshift', np.nan),('ebv',np.nan),\
('host','--'),('hostra',np.nan),('hostdec',np.nan),('hostoffsetang',np.nan),('hostoffsetdist',np.nan),\
('maxappmag',np.nan),("maxband",'--')])
kernelpars = []
list_outfiles = []
P = int(P)
Q = int(Q)
lcurve_losses = Counter()
print("There are a total of {} SNe lightcurves".format(len(sne_files)))
for num, lcurve in enumerate(sne_files[P:Q]):
try:
print("Starting file {:<7}\r".format(num),end="")
with open(lcurve, 'r') as f:
data = json.load(f)
objname = list(data.keys())[0]
#print(objname)
keys = list(data[objname].keys())
tempo_dict = {}
tempo_dict['name'] = objname
tempo_dict['status'] = 'good'
# do we have photometry
if not "photometry" in keys:
tempo_dict['status'] = 'bad'
lcurve_losses.update(['Photometry not in keys'])
continue
# this cut is from the histogram the number of observations at the end - this value isn't known apriori
# it's just a reasonable value where we aren't discarding too much data
# but still making sure the objects we get are well observed
thisnobs = len(data[objname]['photometry'])
if thisnobs < 25:
tempo_dict['status'] = 'bad'
lcurve_losses.update(['Not enough data in SNe stage (all filters)'])
continue
# do we have any claimedtype
if not "claimedtype" in keys:
tempo_dict['status'] = 'bad'
lcurve_losses.update(['No claimed type in SNe stage'])
continue
# if we have claimed types, check them
types = OrderedCounter()
for x in data[objname]['claimedtype']:
types[x['value']] = len(x['source'].split(','))
# the claimedtype list is descending order list with more recent claimed type listed first
# via James Gulliochon (@astrocrash) on twitter
ntype = list(types.keys())[0]
if len(types) == 1:
# if we have only one claimed type, then as long as it's not bad, we're done
if check_bad_types(ntype):
tempo_dict['status'] = 'cand'
lcurve_losses.update(['Bad type with only one type available SNe'])
continue
else:
# if we have multiple claimed types, as long as two or more sources agree
# on the most recent claimedtype, then accept it
if types[ntype] >= 2:
if check_bad_types(ntype):
tempo_dict['status'] = 'cand'
lcurve_losses.update(['Bad types with only two types available SNe'])
continue
else:
# if two sources don't agree on the most recent claimed type
# check if three or more sources agree on the most common claimed type
most_claims = np.array(types.values()).argmax()
nclaims = list(types.values())[most_claims]
#print("THIS NEEDS MORE ANALYSIS!")
#print(nclaims)
if nclaims >= 3:
# we'll accept the most common claimed type as the type then
ntype = list(types)[most_claims]
if check_bad_types(ntype):
tempo_dict['status'] = 'cand'
lcurve_losses.update(['Bad types with 3 or more types available SNe'])
continue
else:
# three sources can't even agree on the most common type, and only one sources claims
# the most recent type
# we treat that as lack of consensus
#print "MaybeWeird ", objname, ntype, types, types[ntype]
tempo_dict['status'] = 'cand'
lcurve_losses.update(["Three source can't agree on type, lack of consensus"])
continue
thisnobs = len(data[objname]['photometry'])
time = []
band = []
mag = []
magerr = []
for obs in data[objname]['photometry']:
keys = obs.keys()
if any(key in obs for key in badflags):
# photometry is not observer frame or is a lower or upperlimit - skip
#lcurve_losses.update(['Bad flags found SNe'])
continue
if not all(key in obs for key in goodflags):
# photometry is not UV/Optical/NIR - all these keys must be present
#lcurve_losses.update(['Not all necessary info found SNe'])
continue
thisfilt = ''
if 'telescope' in obs:
thisfilt = obs['telescope']
if 'system' in obs:
thisfilt = '_'.join((thisfilt, obs['system']))
time.append(float(obs['time']))
band.append('_'.join((obs['band'].replace('\'','prime').replace(' ','_'),thisfilt)))
mag.append(float(obs['magnitude']))
magerr.append(float(obs['e_magnitude']))
if len(time) == 0:
tempo_dict['status'] = 'nogoodobs'
continue
out = np.rec.fromarrays((time, mag, magerr, band), names='time,mag,magerr,pb')
#with open(destination2 +objname+'_lc.dat', 'w') as f:
# f.write(rec2txt(out,precision=8)+'\n')
tempo_dict['status'] = 'good'
#print out
# Do Gaussian Process Fitting right here
try:
#Fix the type for each of the arrays sent to the TouchstoneObject
band = np.array(band)
mag = np.array(mag)
#Fake antares locus ID
locusID = objname
#Fake obsids
obsids = np.array(['a']*len(time))
zp=np.array([27.5]*len(time))
Z = 27.5 # Set the zeropoint for mag-flux conversion
flux = 10**(-0.4 * (mag - Z))
## Alternate form in base e --> 10^10 * exp(-0.921034 * mag)
## Propagation of error formula --> abs(flux * -0.921034 * magerr)
fluxerr = np.abs(flux * -0.921034 * magerr)
tobj = LAobject(locusID, objname, time, flux, fluxerr, obsids, band, zp)
#outbspline = tobj.spline_smooth(per = False, minobs = 6)
outgp = tobj.gaussian_process_smooth(per = False, minobs=15)
outjson = {}
#Only loop over filters that both outgp and outbspline share
#print("OutGP: ", list(outgp.keys()))
#print("OutBspline: ", list(outbspline.keys()))
outfilters = list(set(outgp.keys()))
#if set(outgp.keys()) != set(outbspline.keys()):
# print("Different sets of filters!!!")
#Should I do bandMapping right here and head off all future difficulties?
for filt in outfilters:
# Generate resampled values from the Gaussian Process regression
thisgp, thisjd, thismag, thisdmag = outgp[filt]
#I need to choose whether to sample at a frequency or
# a fixed number of points
## FOR NOW, I'M CHOOSING A FIXED NUMBER OF POINTS
#mod_dates = np.arange(thisjd.min(), thisjd.max(), 1.)
### Using 128 points to allow for multi-level wavelet analysis
mod_dates = np.linspace(thisjd.min(), thisjd.max(), 128)
thismod, modcovar = thisgp.predict(thismag, mod_dates)
thismody, modcovary = thisgp.predict(thismag, thisjd)
thiserr = np.sqrt(np.diag(modcovar))
# Generate resampled values from the spline model
#thisbspline = outbspline[filt]
#thismod_bspline = scinterp.splev(mod_dates, thisbspline)
#print(thismod_bspline)
#orig_val_bspline = scinterp.splev(thisjd, thisbspline)
#This is inefficient, but will allow me to subtract the bspline
# before re-running the gaussian process regression
#temp_passband = np.array([filt] * len(thisjd))
#mag_subtracted = thismag - orig_val_bspline
#print("Orig_val_bspline: ", orig_val_bspline)
#print("Mag sub: ", mag_subtracted)
#tobj_subtracted = TouchstoneObject(objname, thisjd, mag_subtracted, thisdmag, temp_passband)
#outgp_subtracted = tobj_subtracted.gaussian_process_alt_smooth(per = False, scalemin=np.log(10**-4), scalemax=np.log(10**5), minobs=10)
#Since I only gave it values for a single filter, the output will only have one filter in the dictionary
#thisgp_subtracted, _, thismag_subtracted, _ = outgp_subtracted[filt]
#thismod_subtracted, modcovar_subtracted = thisgp_subtracted.predict(thismag_subtracted, mod_dates)
#thiserr_subtracted = np.sqrt(np.diag(modcovar_subtracted))
#Re-add back in the b-spline values for the magnitude
#thismod_subtracted = thismod_subtracted + thismod_bspline
goodstatus = True
mad_test = np.median(np.abs(thismody - np.median(thismody)))
mad_mod = np.median(np.abs(thismod - np.median(thismod )))
mad_data = np.median(np.abs(thismag - np.median(thismag )))
if (mad_test - mad_data) > 0.5 or np.abs(mad_mod - mad_data) > 0.5:
goodstatus=False
message = 'Outlier rejection failed (data: %.3f model: %.3f interp: %.3f)'%(mad_data, mad_test, mad_mod)
#print(message)
#lcurve_losses.update(['Outlier rejection failes'])
#continue
#Get rid of the straight line approximations
if np.ptp(thismod) < 2:
continue
#print(thisgp.get_parameter_vector())
outjson[filt] = {'kernel':list(thisgp.get_parameter_vector()),\
'mjd':thisjd.tolist(),\
'mag':thismag.tolist(),\
'dmag':thisdmag.tolist(),\
'modeldate':mod_dates.tolist(),\
'modelmag':thismod.tolist(),\
'modelerr':thiserr.tolist(),\
#'modelmag_sub':thismod_subtracted.tolist(),\
#'bsplinemag':thismod_bspline.tolist(),\
'goodstatus':goodstatus,\
'type': ntype}
kernelpars.append(thisgp.get_parameter_vector()[0])
outjson_mapped = bandMap.remapBands(outjson)
#print(outjson_mapped.keys())
#print(outjson.keys())
if len(outjson_mapped.keys()) > 0:
list_outfiles.append(objname + '_gpsmoothed.json')
with open(destination + objname+'_gpsmoothed.json', mode='w') as f:
json.dump(outjson_mapped, f, indent=2, sort_keys=True)
else:
lcurve_losses.update(['No keys in output json'])
except np.linalg.linalg.LinAlgError as e:
print(e)
print("Failed to complete Gaussian Processing")
lcurve_losses.update(['Linear algebra error in GP processing'])
continue
except:
continue
#close JSON
#endfor over files
with open(destination + 'LCURVES.LIST', mode='w') as outfile:
outfile.write("\n".join(map(str, list_outfiles)))
loss_file = 'sne_losses.json'
with open(loss_file, mode='w') as lfile:
json.dump(lcurve_losses, lfile)
def main():
GProcessing()
if __name__=='__main__':
sys.exit(main())
|
tayebzaidi/HonorsThesisTZ
|
ThesisCode/gen_lightcurves/parse_sne_spaceMod.py
|
Python
|
gpl-3.0
| 16,874
|
[
"Gaussian"
] |
138d331ad9f84df84e1c33ef8bc470d856dec2e1a1d03b8987a12a807c363170
|
# -*- coding: utf-8 -*-
# vi:si:et:sw=4:sts=4:ts=4
##
## Copyright (C) 2012 Async Open Source <http://www.async.com.br>
## All rights reserved
##
## This program is free software; you can redistribute it and/or modify
## it under the terms of the GNU General Public License as published by
## the Free Software Foundation; either version 2 of the License, or
## (at your option) any later version.
##
## This program is distributed in the hope that it will be useful,
## but WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
## GNU General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with this program; if not, write to the Free Software
## Foundation, Inc., or visit: http://www.gnu.org/.
##
## Author(s): Stoq Team <stoq-devel@async.com.br>
##
import gtk
import mock
from decimal import Decimal
from kiwi.currency import currency
from stoqlib.database.runtime import get_current_user
from stoqlib.domain.event import Event
from stoqlib.domain.sale import Sale
from stoqlib.gui.editors.saleeditor import (SaleQuoteItemEditor, SaleClientEditor,
SalesPersonEditor, SaleTokenEditor)
from stoqlib.gui.test.uitestutils import GUITest
class TestSaleQuoteItemEditor(GUITest):
def test_show(self):
sale = self.create_sale()
storable = self.create_storable(branch=sale.branch, stock=20)
sale_item = sale.add_sellable(storable.product.sellable)
sale_item.price = 100
editor = SaleQuoteItemEditor(self.store, sale_item)
editor.item_slave.sale.set_label('12345')
self.check_editor(editor, 'editor-salequoteitem-show')
module = 'stoqlib.lib.pluginmanager.PluginManager.is_active'
with mock.patch(module) as patch:
patch.return_value = True
editor = SaleQuoteItemEditor(self.store, sale_item)
editor.item_slave.sale.set_label('23456')
self.check_editor(editor, 'editor-salequoteitem-show-nfe')
class TestSaleQuoteItemSlave(GUITest):
def test_show_param_allow_higher_sale_price(self):
sale = self.create_sale()
storable = self.create_storable(branch=sale.branch, stock=20)
sale_item = sale.add_sellable(storable.product.sellable)
sale_item.price = 100
editor = SaleQuoteItemEditor(self.store, sale_item)
slave = editor.item_slave
slave.sale.set_label('12345')
# quantity=1, price=100
with self.sysparam(ALLOW_HIGHER_SALE_PRICE=True):
self.assertEqual(slave.total.read(), 100)
slave.quantity.update(2)
self.assertEqual(slave.total.read(), 200)
slave.price.update(150)
self.assertEqual(slave.total.read(), 300)
slave.reserved.update(1)
self.click(editor.main_dialog.ok_button)
self.check_editor(editor, 'slave-salequoteitem-with-higher-price-show')
def test_edit_product_without_storable(self):
sale_item = self.create_sale_item()
sale_item.price = 100
self.assertEqual(sale_item.quantity, 1)
editor = SaleQuoteItemEditor(self.store, sale_item)
slave = editor.item_slave
slave.sale.set_label('12345')
self.assertNotVisible(slave, ['reserved'])
self.assertEqual(slave.total.read(), 100)
slave.quantity.update(3)
self.assertEqual(slave.total.read(), 300)
self.click(editor.main_dialog.ok_button)
self.assertEqual(sale_item.quantity, 3)
def test_edit_product_with_batch(self):
sale = self.create_sale()
product = self.create_product()
self.create_storable(product=product, is_batch=True)
sale.status = Sale.STATUS_QUOTE
sale_item = sale.add_sellable(product.sellable)
sale_item.price = 10
self.assertEqual(sale_item.quantity, 1)
editor = SaleQuoteItemEditor(self.store, sale_item)
slave = editor.item_slave
self.assertNotVisible(slave, ['reserved'])
slave.quantity.update(2)
self.assertEqual(slave.total.read(), 20)
self.click(editor.main_dialog.ok_button)
self.assertEqual(sale_item.quantity, 2)
def test_show_param_no_allow_higher_sale_price(self):
sale_item = self.create_sale_item()
editor = SaleQuoteItemEditor(self.store, sale_item)
slave = editor.item_slave
slave.sale.set_label('12345')
# quantity=1, price=100
with self.sysparam(ALLOW_HIGHER_SALE_PRICE=False):
self.assertEqual(slave.total.read(), 100)
slave.quantity.update(2)
self.assertEqual(slave.total.read(), 200)
slave.price.update(150)
# The price greater than 100 should be invalid.
self.assertInvalid(slave, ['price'])
def test_on_confirm_with_discount(self):
events_before = self.store.find(Event).count()
sale_item = self.create_sale_item()
sale_item.sale.identifier = 333123
current_user = get_current_user(self.store)
current_user.profile.max_discount = Decimal('5')
# A manager to authorize the discount
manager = self.create_user()
manager.profile.max_discount = Decimal('10')
editor = SaleQuoteItemEditor(self.store, sale_item)
slave = editor.item_slave
# Try applying 9% of discount
slave.price.update(currency('9.10'))
# The user is not allowed to give 10% discount
self.assertNotSensitive(editor.main_dialog, ['ok_button'])
# Lets call the manager and ask for permission
with mock.patch('stoqlib.gui.editors.saleeditor.run_dialog') as rd:
rd.return_value = manager
slave.price.emit('icon-press', gtk.ENTRY_ICON_PRIMARY, None)
# Now it should be possible to confirm
self.click(editor.main_dialog.ok_button)
events_after = self.store.find(Event).count()
self.assertEquals(events_after, events_before + 1)
last_event = self.store.find(Event).order_by(Event.id).last()
expected = (u'Sale 333123: User username authorized 9.00 % '
u'of discount changing\n Description value from $10.00 to $9.10.')
self.assertEquals(last_event.description, expected)
def test_on_confirm_without_discount(self):
events_before = self.store.find(Event).count()
sale_item = self.create_sale_item()
current_user = get_current_user(self.store)
current_user.profile.max_discount = Decimal('5')
# A manager to authorize the discount
manager = self.create_user()
manager.profile.max_discount = Decimal('10')
editor = SaleQuoteItemEditor(self.store, sale_item)
slave = editor.item_slave
# Try applying 10% of discount
slave.price.update(currency('9.00'))
# The user is not allowed to give 10% discount
self.assertNotSensitive(editor.main_dialog, ['ok_button'])
# Lets call the manager and ask for permission
with mock.patch('stoqlib.gui.editors.saleeditor.run_dialog') as rd:
rd.return_value = manager
slave.price.emit('icon-press', gtk.ENTRY_ICON_PRIMARY, None)
# Forget about the discount
slave.price.update(currency('10'))
# This will not trigger an event
self.click(editor.main_dialog.ok_button)
events_after = self.store.find(Event).count()
# The number of events doesn't changed
self.assertEquals(events_after, events_before)
class TestSaleClientEditor(GUITest):
def test_change_client(self):
zoidberg = self.create_client(u"Zoidberg")
bender = self.create_client(u"Bender")
sale = self.create_sale(client=zoidberg)
sale.identifier = 12345
sale.status = sale.STATUS_CONFIRMED
editor = SaleClientEditor(self.store, model=sale)
self.assertEquals(editor.status.get_text(),
(u"Confirmed" or u"Ordered"))
self.assertFalse(editor.salesperson_combo.get_sensitive())
self.assertEquals(zoidberg, editor.model.client)
editor.client.select_item_by_data(bender.id)
self.click(editor.main_dialog.ok_button)
self.assertEquals(bender, sale.client)
self.check_editor(editor, 'editor-sale-client-edit')
class TestSalesPersonEditor(GUITest):
def test_change_salesperson(self):
salesperson1 = self.create_sales_person()
salesperson2 = self.create_sales_person()
sale = self.create_sale()
sale.identifier = 1337
sale.status = sale.STATUS_CONFIRMED
sale.salesperson = salesperson1
editor = SalesPersonEditor(self.store, model=sale)
self.check_editor(editor, 'editor-salesperson-edit')
self.assertEquals(editor.salesperson_combo.get_selected(), salesperson1)
self.assertFalse(editor.client_box.get_property('visible'))
self.assertFalse(editor.client_lbl.get_property('visible'))
editor.salesperson_combo.select_item_by_data(salesperson2)
self.click(editor.main_dialog.ok_button)
self.assertEquals(sale.salesperson, salesperson2)
class TestSaleTokenEditor(GUITest):
def test_create(self):
editor = SaleTokenEditor(self.store)
self.check_editor(editor, 'editor-saletoken-create')
def test_edit(self):
token = self.create_sale_token(code=u'sale token 1')
editor = SaleTokenEditor(self.store, model=token)
self.check_editor(editor, 'editor-saletoken-edit')
def test_description_valitation(self):
self.create_sale_token(code=u'sale token 1')
editor = SaleTokenEditor(self.store)
editor.code.set_text(u'sale token 1')
# We should not be able to register this token, description should be
# unique
self.assertNotSensitive(editor.main_dialog, ['ok_button'])
editor.code.set_text(u'sale token 2')
self.assertSensitive(editor.main_dialog, ['ok_button'])
|
andrebellafronte/stoq
|
stoqlib/gui/test/test_salequoteitemeditor.py
|
Python
|
gpl-2.0
| 10,137
|
[
"VisIt"
] |
5db4a7a5dca8bae25a71c78d20e953c6fbefe4dd2e0c3e2e90943472218b9d02
|
# Authors: Alexandre Gramfort <alexandre.gramfort@inria.fr>
# Mathieu Blondel <mathieu@mblondel.org>
# Olivier Grisel <olivier.grisel@ensta.org>
# Andreas Mueller <amueller@ais.uni-bonn.de>
# Eric Martin <eric@ericmart.in>
# Giorgio Patrini <giorgio.patrini@anu.edu.au>
# License: BSD 3 clause
from itertools import chain, combinations
import numbers
import warnings
import numpy as np
from scipy import sparse
from ..base import BaseEstimator, TransformerMixin
from ..externals import six
from ..utils import check_array
from ..utils import deprecated
from ..utils.extmath import row_norms
from ..utils.extmath import _incremental_mean_and_var
from ..utils.fixes import combinations_with_replacement as combinations_w_r
from ..utils.sparsefuncs_fast import (inplace_csr_row_normalize_l1,
inplace_csr_row_normalize_l2)
from ..utils.sparsefuncs import (inplace_column_scale,
mean_variance_axis, incr_mean_variance_axis,
min_max_axis)
from ..utils.validation import check_is_fitted, FLOAT_DTYPES
zip = six.moves.zip
map = six.moves.map
range = six.moves.range
__all__ = [
'Binarizer',
'KernelCenterer',
'MinMaxScaler',
'MaxAbsScaler',
'Normalizer',
'OneHotEncoder',
'RobustScaler',
'StandardScaler',
'add_dummy_feature',
'binarize',
'normalize',
'scale',
'robust_scale',
'maxabs_scale',
'minmax_scale',
]
DEPRECATION_MSG_1D = (
"Passing 1d arrays as data is deprecated in 0.17 and will "
"raise ValueError in 0.19. Reshape your data either using "
"X.reshape(-1, 1) if your data has a single feature or "
"X.reshape(1, -1) if it contains a single sample."
)
def _handle_zeros_in_scale(scale, copy=True):
''' Makes sure that whenever scale is zero, we handle it correctly.
This happens in most scalers when we have constant features.'''
# if we are fitting on 1D arrays, scale might be a scalar
if np.isscalar(scale):
if scale == .0:
scale = 1.
return scale
elif isinstance(scale, np.ndarray):
if copy:
# New array to avoid side-effects
scale = scale.copy()
scale[scale == 0.0] = 1.0
return scale
def scale(X, axis=0, with_mean=True, with_std=True, copy=True):
"""Standardize a dataset along any axis
Center to the mean and component wise scale to unit variance.
Read more in the :ref:`User Guide <preprocessing_scaler>`.
Parameters
----------
X : {array-like, sparse matrix}
The data to center and scale.
axis : int (0 by default)
axis used to compute the means and standard deviations along. If 0,
independently standardize each feature, otherwise (if 1) standardize
each sample.
with_mean : boolean, True by default
If True, center the data before scaling.
with_std : boolean, True by default
If True, scale the data to unit variance (or equivalently,
unit standard deviation).
copy : boolean, optional, default True
set to False to perform inplace row normalization and avoid a
copy (if the input is already a numpy array or a scipy.sparse
CSC matrix and if axis is 1).
Notes
-----
This implementation will refuse to center scipy.sparse matrices
since it would make them non-sparse and would potentially crash the
program with memory exhaustion problems.
Instead the caller is expected to either set explicitly
`with_mean=False` (in that case, only variance scaling will be
performed on the features of the CSC matrix) or to call `X.toarray()`
if he/she expects the materialized dense array to fit in memory.
To avoid memory copy the caller should pass a CSC matrix.
See also
--------
:class:`sklearn.preprocessing.StandardScaler` to perform centering and
scaling using the ``Transformer`` API (e.g. as part of a preprocessing
:class:`sklearn.pipeline.Pipeline`)
"""
X = check_array(X, accept_sparse='csc', copy=copy, ensure_2d=False,
warn_on_dtype=True, estimator='the scale function',
dtype=FLOAT_DTYPES)
if sparse.issparse(X):
if with_mean:
raise ValueError(
"Cannot center sparse matrices: pass `with_mean=False` instead"
" See docstring for motivation and alternatives.")
if axis != 0:
raise ValueError("Can only scale sparse matrix on axis=0, "
" got axis=%d" % axis)
if with_std:
_, var = mean_variance_axis(X, axis=0)
var = _handle_zeros_in_scale(var, copy=False)
inplace_column_scale(X, 1 / np.sqrt(var))
else:
X = np.asarray(X)
if with_mean:
mean_ = np.mean(X, axis)
if with_std:
scale_ = np.std(X, axis)
# Xr is a view on the original array that enables easy use of
# broadcasting on the axis in which we are interested in
Xr = np.rollaxis(X, axis)
if with_mean:
Xr -= mean_
mean_1 = Xr.mean(axis=0)
# Verify that mean_1 is 'close to zero'. If X contains very
# large values, mean_1 can also be very large, due to a lack of
# precision of mean_. In this case, a pre-scaling of the
# concerned feature is efficient, for instance by its mean or
# maximum.
if not np.allclose(mean_1, 0):
warnings.warn("Numerical issues were encountered "
"when centering the data "
"and might not be solved. Dataset may "
"contain too large values. You may need "
"to prescale your features.")
Xr -= mean_1
if with_std:
scale_ = _handle_zeros_in_scale(scale_, copy=False)
Xr /= scale_
if with_mean:
mean_2 = Xr.mean(axis=0)
# If mean_2 is not 'close to zero', it comes from the fact that
# scale_ is very small so that mean_2 = mean_1/scale_ > 0, even
# if mean_1 was close to zero. The problem is thus essentially
# due to the lack of precision of mean_. A solution is then to
# subtract the mean again:
if not np.allclose(mean_2, 0):
warnings.warn("Numerical issues were encountered "
"when scaling the data "
"and might not be solved. The standard "
"deviation of the data is probably "
"very close to 0. ")
Xr -= mean_2
return X
class MinMaxScaler(BaseEstimator, TransformerMixin):
"""Transforms features by scaling each feature to a given range.
This estimator scales and translates each feature individually such
that it is in the given range on the training set, i.e. between
zero and one.
The transformation is given by::
X_std = (X - X.min(axis=0)) / (X.max(axis=0) - X.min(axis=0))
X_scaled = X_std * (max - min) + min
where min, max = feature_range.
This transformation is often used as an alternative to zero mean,
unit variance scaling.
Read more in the :ref:`User Guide <preprocessing_scaler>`.
Parameters
----------
feature_range: tuple (min, max), default=(0, 1)
Desired range of transformed data.
copy : boolean, optional, default True
Set to False to perform inplace row normalization and avoid a
copy (if the input is already a numpy array).
Attributes
----------
min_ : ndarray, shape (n_features,)
Per feature adjustment for minimum.
scale_ : ndarray, shape (n_features,)
Per feature relative scaling of the data.
.. versionadded:: 0.17
*scale_* attribute.
data_min_ : ndarray, shape (n_features,)
Per feature minimum seen in the data
.. versionadded:: 0.17
*data_min_* instead of deprecated *data_min*.
data_max_ : ndarray, shape (n_features,)
Per feature maximum seen in the data
.. versionadded:: 0.17
*data_max_* instead of deprecated *data_max*.
data_range_ : ndarray, shape (n_features,)
Per feature range ``(data_max_ - data_min_)`` seen in the data
.. versionadded:: 0.17
*data_range_* instead of deprecated *data_range*.
"""
def __init__(self, feature_range=(0, 1), copy=True):
self.feature_range = feature_range
self.copy = copy
@property
@deprecated("Attribute data_range will be removed in "
"0.19. Use ``data_range_`` instead")
def data_range(self):
return self.data_range_
@property
@deprecated("Attribute data_min will be removed in "
"0.19. Use ``data_min_`` instead")
def data_min(self):
return self.data_min_
def _reset(self):
"""Reset internal data-dependent state of the scaler, if necessary.
__init__ parameters are not touched.
"""
# Checking one attribute is enough, becase they are all set together
# in partial_fit
if hasattr(self, 'scale_'):
del self.scale_
del self.min_
del self.n_samples_seen_
del self.data_min_
del self.data_max_
del self.data_range_
def fit(self, X, y=None):
"""Compute the minimum and maximum to be used for later scaling.
Parameters
----------
X : array-like, shape [n_samples, n_features]
The data used to compute the per-feature minimum and maximum
used for later scaling along the features axis.
"""
# Reset internal state before fitting
self._reset()
return self.partial_fit(X, y)
def partial_fit(self, X, y=None):
"""Online computation of min and max on X for later scaling.
All of X is processed as a single batch. This is intended for cases
when `fit` is not feasible due to very large number of `n_samples`
or because X is read from a continuous stream.
Parameters
----------
X : array-like, shape [n_samples, n_features]
The data used to compute the mean and standard deviation
used for later scaling along the features axis.
y : Passthrough for ``Pipeline`` compatibility.
"""
feature_range = self.feature_range
if feature_range[0] >= feature_range[1]:
raise ValueError("Minimum of desired feature range must be smaller"
" than maximum. Got %s." % str(feature_range))
if sparse.issparse(X):
raise TypeError("MinMaxScaler does no support sparse input. "
"You may consider to use MaxAbsScaler instead.")
X = check_array(X, copy=self.copy, ensure_2d=False, warn_on_dtype=True,
estimator=self, dtype=FLOAT_DTYPES)
if X.ndim == 1:
warnings.warn(DEPRECATION_MSG_1D, DeprecationWarning)
data_min = np.min(X, axis=0)
data_max = np.max(X, axis=0)
# First pass
if not hasattr(self, 'n_samples_seen_'):
self.n_samples_seen_ = X.shape[0]
# Next steps
else:
data_min = np.minimum(self.data_min_, data_min)
data_max = np.maximum(self.data_max_, data_max)
self.n_samples_seen_ += X.shape[0]
data_range = data_max - data_min
self.scale_ = ((feature_range[1] - feature_range[0]) /
_handle_zeros_in_scale(data_range))
self.min_ = feature_range[0] - data_min * self.scale_
self.data_min_ = data_min
self.data_max_ = data_max
self.data_range_ = data_range
return self
def transform(self, X):
"""Scaling features of X according to feature_range.
Parameters
----------
X : array-like, shape [n_samples, n_features]
Input data that will be transformed.
"""
check_is_fitted(self, 'scale_')
X = check_array(X, copy=self.copy, ensure_2d=False, dtype=FLOAT_DTYPES)
if X.ndim == 1:
warnings.warn(DEPRECATION_MSG_1D, DeprecationWarning)
X *= self.scale_
X += self.min_
return X
def inverse_transform(self, X):
"""Undo the scaling of X according to feature_range.
Parameters
----------
X : array-like, shape [n_samples, n_features]
Input data that will be transformed. It cannot be sparse.
"""
check_is_fitted(self, 'scale_')
X = check_array(X, copy=self.copy, ensure_2d=False, dtype=FLOAT_DTYPES)
if X.ndim == 1:
warnings.warn(DEPRECATION_MSG_1D, DeprecationWarning)
X -= self.min_
X /= self.scale_
return X
def minmax_scale(X, feature_range=(0, 1), axis=0, copy=True):
"""Transforms features by scaling each feature to a given range.
This estimator scales and translates each feature individually such
that it is in the given range on the training set, i.e. between
zero and one.
The transformation is given by::
X_std = (X - X.min(axis=0)) / (X.max(axis=0) - X.min(axis=0))
X_scaled = X_std * (max - min) + min
where min, max = feature_range.
This transformation is often used as an alternative to zero mean,
unit variance scaling.
Read more in the :ref:`User Guide <preprocessing_scaler>`.
.. versionadded:: 0.17
*minmax_scale* function interface to :class:`sklearn.preprocessing.MinMaxScaler`.
Parameters
----------
feature_range: tuple (min, max), default=(0, 1)
Desired range of transformed data.
axis : int (0 by default)
axis used to scale along. If 0, independently scale each feature,
otherwise (if 1) scale each sample.
copy : boolean, optional, default is True
Set to False to perform inplace scaling and avoid a copy (if the input
is already a numpy array).
"""
# To allow retro-compatibility, we handle here the case of 1D-input
# From 0.17, 1D-input are deprecated in scaler objects
# Although, we want to allow the users to keep calling this function
# with 1D-input.
# Cast input to array, as we need to check ndim. Prior to 0.17, that was
# done inside the scaler object fit_transform.
# If copy is required, it will be done inside the scaler object.
X = check_array(X, copy=False, ensure_2d=False, warn_on_dtype=True,
dtype=FLOAT_DTYPES)
original_ndim = X.ndim
if original_ndim == 1:
X = X.reshape(X.shape[0], 1)
s = MinMaxScaler(feature_range=feature_range, copy=copy)
if axis == 0:
X = s.fit_transform(X)
else:
X = s.fit_transform(X.T).T
if original_ndim == 1:
X = X.ravel()
return X
class StandardScaler(BaseEstimator, TransformerMixin):
"""Standardize features by removing the mean and scaling to unit variance
Centering and scaling happen independently on each feature by computing
the relevant statistics on the samples in the training set. Mean and
standard deviation are then stored to be used on later data using the
`transform` method.
Standardization of a dataset is a common requirement for many
machine learning estimators: they might behave badly if the
individual feature do not more or less look like standard normally
distributed data (e.g. Gaussian with 0 mean and unit variance).
For instance many elements used in the objective function of
a learning algorithm (such as the RBF kernel of Support Vector
Machines or the L1 and L2 regularizers of linear models) assume that
all features are centered around 0 and have variance in the same
order. If a feature has a variance that is orders of magnitude larger
that others, it might dominate the objective function and make the
estimator unable to learn from other features correctly as expected.
This scaler can also be applied to sparse CSR or CSC matrices by passing
`with_mean=False` to avoid breaking the sparsity structure of the data.
Read more in the :ref:`User Guide <preprocessing_scaler>`.
Parameters
----------
with_mean : boolean, True by default
If True, center the data before scaling.
This does not work (and will raise an exception) when attempted on
sparse matrices, because centering them entails building a dense
matrix which in common use cases is likely to be too large to fit in
memory.
with_std : boolean, True by default
If True, scale the data to unit variance (or equivalently,
unit standard deviation).
copy : boolean, optional, default True
If False, try to avoid a copy and do inplace scaling instead.
This is not guaranteed to always work inplace; e.g. if the data is
not a NumPy array or scipy.sparse CSR matrix, a copy may still be
returned.
Attributes
----------
scale_ : ndarray, shape (n_features,)
Per feature relative scaling of the data.
.. versionadded:: 0.17
*scale_* is recommended instead of deprecated *std_*.
mean_ : array of floats with shape [n_features]
The mean value for each feature in the training set.
var_ : array of floats with shape [n_features]
The variance for each feature in the training set. Used to compute
`scale_`
n_samples_seen_ : int
The number of samples processed by the estimator. Will be reset on
new calls to fit, but increments across ``partial_fit`` calls.
See also
--------
:func:`sklearn.preprocessing.scale` to perform centering and
scaling without using the ``Transformer`` object oriented API
:class:`sklearn.decomposition.RandomizedPCA` with `whiten=True`
to further remove the linear correlation across features.
"""
def __init__(self, copy=True, with_mean=True, with_std=True):
self.with_mean = with_mean
self.with_std = with_std
self.copy = copy
@property
@deprecated("Attribute ``std_`` will be removed in 0.19. Use ``scale_`` instead")
def std_(self):
return self.scale_
def _reset(self):
"""Reset internal data-dependent state of the scaler, if necessary.
__init__ parameters are not touched.
"""
# Checking one attribute is enough, becase they are all set together
# in partial_fit
if hasattr(self, 'scale_'):
del self.scale_
del self.n_samples_seen_
del self.mean_
del self.var_
def fit(self, X, y=None):
"""Compute the mean and std to be used for later scaling.
Parameters
----------
X : {array-like, sparse matrix}, shape [n_samples, n_features]
The data used to compute the mean and standard deviation
used for later scaling along the features axis.
y: Passthrough for ``Pipeline`` compatibility.
"""
# Reset internal state before fitting
self._reset()
return self.partial_fit(X, y)
def partial_fit(self, X, y=None):
"""Online computation of mean and std on X for later scaling.
All of X is processed as a single batch. This is intended for cases
when `fit` is not feasible due to very large number of `n_samples`
or because X is read from a continuous stream.
The algorithm for incremental mean and std is given in Equation 1.5a,b
in Chan, Tony F., Gene H. Golub, and Randall J. LeVeque. "Algorithms
for computing the sample variance: Analysis and recommendations."
The American Statistician 37.3 (1983): 242-247:
Parameters
----------
X : {array-like, sparse matrix}, shape [n_samples, n_features]
The data used to compute the mean and standard deviation
used for later scaling along the features axis.
y: Passthrough for ``Pipeline`` compatibility.
"""
X = check_array(X, accept_sparse=('csr', 'csc'), copy=self.copy,
ensure_2d=False, warn_on_dtype=True,
estimator=self, dtype=FLOAT_DTYPES)
if X.ndim == 1:
warnings.warn(DEPRECATION_MSG_1D, DeprecationWarning)
# Even in the case of `with_mean=False`, we update the mean anyway
# This is needed for the incremental computation of the var
# See incr_mean_variance_axis and _incremental_mean_variance_axis
if sparse.issparse(X):
if self.with_mean:
raise ValueError(
"Cannot center sparse matrices: pass `with_mean=False` "
"instead. See docstring for motivation and alternatives.")
if self.with_std:
# First pass
if not hasattr(self, 'n_samples_seen_'):
self.mean_, self.var_ = mean_variance_axis(X, axis=0)
self.n_samples_seen_ = X.shape[0]
# Next passes
else:
self.mean_, self.var_, self.n_samples_seen_ = \
incr_mean_variance_axis(X, axis=0,
last_mean=self.mean_,
last_var=self.var_,
last_n=self.n_samples_seen_)
else:
self.mean_ = None
self.var_ = None
else:
# First pass
if not hasattr(self, 'n_samples_seen_'):
self.mean_ = .0
self.n_samples_seen_ = 0
if self.with_std:
self.var_ = .0
else:
self.var_ = None
self.mean_, self.var_, self.n_samples_seen_ = \
_incremental_mean_and_var(X, self.mean_, self.var_,
self.n_samples_seen_)
if self.with_std:
self.scale_ = _handle_zeros_in_scale(np.sqrt(self.var_))
else:
self.scale_ = None
return self
def transform(self, X, y=None, copy=None):
"""Perform standardization by centering and scaling
Parameters
----------
X : array-like, shape [n_samples, n_features]
The data used to scale along the features axis.
"""
check_is_fitted(self, 'scale_')
copy = copy if copy is not None else self.copy
X = check_array(X, accept_sparse='csr', copy=copy,
ensure_2d=False, warn_on_dtype=True,
estimator=self, dtype=FLOAT_DTYPES)
if X.ndim == 1:
warnings.warn(DEPRECATION_MSG_1D, DeprecationWarning)
if sparse.issparse(X):
if self.with_mean:
raise ValueError(
"Cannot center sparse matrices: pass `with_mean=False` "
"instead. See docstring for motivation and alternatives.")
if self.scale_ is not None:
inplace_column_scale(X, 1 / self.scale_)
else:
if self.with_mean:
X -= self.mean_
if self.with_std:
X /= self.scale_
return X
def inverse_transform(self, X, copy=None):
"""Scale back the data to the original representation
Parameters
----------
X : array-like, shape [n_samples, n_features]
The data used to scale along the features axis.
"""
check_is_fitted(self, 'scale_')
copy = copy if copy is not None else self.copy
if sparse.issparse(X):
if self.with_mean:
raise ValueError(
"Cannot uncenter sparse matrices: pass `with_mean=False` "
"instead See docstring for motivation and alternatives.")
if not sparse.isspmatrix_csr(X):
X = X.tocsr()
copy = False
if copy:
X = X.copy()
if self.scale_ is not None:
inplace_column_scale(X, self.scale_)
else:
X = np.asarray(X)
if copy:
X = X.copy()
if self.with_std:
X *= self.scale_
if self.with_mean:
X += self.mean_
return X
class MaxAbsScaler(BaseEstimator, TransformerMixin):
"""Scale each feature by its maximum absolute value.
This estimator scales and translates each feature individually such
that the maximal absolute value of each feature in the
training set will be 1.0. It does not shift/center the data, and
thus does not destroy any sparsity.
This scaler can also be applied to sparse CSR or CSC matrices.
.. versionadded:: 0.17
Parameters
----------
copy : boolean, optional, default is True
Set to False to perform inplace scaling and avoid a copy (if the input
is already a numpy array).
Attributes
----------
scale_ : ndarray, shape (n_features,)
Per feature relative scaling of the data.
.. versionadded:: 0.17
*scale_* attribute.
max_abs_ : ndarray, shape (n_features,)
Per feature maximum absolute value.
n_samples_seen_ : int
The number of samples processed by the estimator. Will be reset on
new calls to fit, but increments across ``partial_fit`` calls.
"""
def __init__(self, copy=True):
self.copy = copy
def _reset(self):
"""Reset internal data-dependent state of the scaler, if necessary.
__init__ parameters are not touched.
"""
# Checking one attribute is enough, becase they are all set together
# in partial_fit
if hasattr(self, 'scale_'):
del self.scale_
del self.n_samples_seen_
del self.max_abs_
def fit(self, X, y=None):
"""Compute the maximum absolute value to be used for later scaling.
Parameters
----------
X : {array-like, sparse matrix}, shape [n_samples, n_features]
The data used to compute the per-feature minimum and maximum
used for later scaling along the features axis.
"""
# Reset internal state before fitting
self._reset()
return self.partial_fit(X, y)
def partial_fit(self, X, y=None):
"""Online computation of max absolute value of X for later scaling.
All of X is processed as a single batch. This is intended for cases
when `fit` is not feasible due to very large number of `n_samples`
or because X is read from a continuous stream.
Parameters
----------
X : {array-like, sparse matrix}, shape [n_samples, n_features]
The data used to compute the mean and standard deviation
used for later scaling along the features axis.
y: Passthrough for ``Pipeline`` compatibility.
"""
X = check_array(X, accept_sparse=('csr', 'csc'), copy=self.copy,
ensure_2d=False, estimator=self, dtype=FLOAT_DTYPES)
if X.ndim == 1:
warnings.warn(DEPRECATION_MSG_1D, DeprecationWarning)
if sparse.issparse(X):
mins, maxs = min_max_axis(X, axis=0)
max_abs = np.maximum(np.abs(mins), np.abs(maxs))
else:
max_abs = np.abs(X).max(axis=0)
# First pass
if not hasattr(self, 'n_samples_seen_'):
self.n_samples_seen_ = X.shape[0]
# Next passes
else:
max_abs = np.maximum(self.max_abs_, max_abs)
self.n_samples_seen_ += X.shape[0]
self.max_abs_ = max_abs
self.scale_ = _handle_zeros_in_scale(max_abs)
return self
def transform(self, X, y=None):
"""Scale the data
Parameters
----------
X : {array-like, sparse matrix}
The data that should be scaled.
"""
check_is_fitted(self, 'scale_')
X = check_array(X, accept_sparse=('csr', 'csc'), copy=self.copy,
ensure_2d=False, estimator=self, dtype=FLOAT_DTYPES)
if X.ndim == 1:
warnings.warn(DEPRECATION_MSG_1D, DeprecationWarning)
if sparse.issparse(X):
inplace_column_scale(X, 1.0 / self.scale_)
else:
X /= self.scale_
return X
def inverse_transform(self, X):
"""Scale back the data to the original representation
Parameters
----------
X : {array-like, sparse matrix}
The data that should be transformed back.
"""
check_is_fitted(self, 'scale_')
X = check_array(X, accept_sparse=('csr', 'csc'), copy=self.copy,
ensure_2d=False, estimator=self, dtype=FLOAT_DTYPES)
if X.ndim == 1:
warnings.warn(DEPRECATION_MSG_1D, DeprecationWarning)
if sparse.issparse(X):
inplace_column_scale(X, self.scale_)
else:
X *= self.scale_
return X
def maxabs_scale(X, axis=0, copy=True):
"""Scale each feature to the [-1, 1] range without breaking the sparsity.
This estimator scales each feature individually such
that the maximal absolute value of each feature in the
training set will be 1.0.
This scaler can also be applied to sparse CSR or CSC matrices.
Parameters
----------
axis : int (0 by default)
axis used to scale along. If 0, independently scale each feature,
otherwise (if 1) scale each sample.
copy : boolean, optional, default is True
Set to False to perform inplace scaling and avoid a copy (if the input
is already a numpy array).
"""
# To allow retro-compatibility, we handle here the case of 1D-input
# From 0.17, 1D-input are deprecated in scaler objects
# Although, we want to allow the users to keep calling this function
# with 1D-input.
# Cast input to array, as we need to check ndim. Prior to 0.17, that was
# done inside the scaler object fit_transform.
# If copy is required, it will be done inside the scaler object.
X = check_array(X, accept_sparse=('csr', 'csc'), copy=False,
ensure_2d=False, dtype=FLOAT_DTYPES)
original_ndim = X.ndim
if original_ndim == 1:
X = X.reshape(X.shape[0], 1)
s = MaxAbsScaler(copy=copy)
if axis == 0:
X = s.fit_transform(X)
else:
X = s.fit_transform(X.T).T
if original_ndim == 1:
X = X.ravel()
return X
class RobustScaler(BaseEstimator, TransformerMixin):
"""Scale features using statistics that are robust to outliers.
This Scaler removes the median and scales the data according to
the Interquartile Range (IQR). The IQR is the range between the 1st
quartile (25th quantile) and the 3rd quartile (75th quantile).
Centering and scaling happen independently on each feature (or each
sample, depending on the `axis` argument) by computing the relevant
statistics on the samples in the training set. Median and interquartile
range are then stored to be used on later data using the `transform`
method.
Standardization of a dataset is a common requirement for many
machine learning estimators. Typically this is done by removing the mean
and scaling to unit variance. However, outliers can often influence the
sample mean / variance in a negative way. In such cases, the median and
the interquartile range often give better results.
.. versionadded:: 0.17
Read more in the :ref:`User Guide <preprocessing_scaler>`.
Parameters
----------
with_centering : boolean, True by default
If True, center the data before scaling.
This does not work (and will raise an exception) when attempted on
sparse matrices, because centering them entails building a dense
matrix which in common use cases is likely to be too large to fit in
memory.
with_scaling : boolean, True by default
If True, scale the data to interquartile range.
copy : boolean, optional, default is True
If False, try to avoid a copy and do inplace scaling instead.
This is not guaranteed to always work inplace; e.g. if the data is
not a NumPy array or scipy.sparse CSR matrix, a copy may still be
returned.
Attributes
----------
center_ : array of floats
The median value for each feature in the training set.
scale_ : array of floats
The (scaled) interquartile range for each feature in the training set.
.. versionadded:: 0.17
*scale_* attribute.
See also
--------
:class:`sklearn.preprocessing.StandardScaler` to perform centering
and scaling using mean and variance.
:class:`sklearn.decomposition.RandomizedPCA` with `whiten=True`
to further remove the linear correlation across features.
Notes
-----
See examples/preprocessing/plot_robust_scaling.py for an example.
http://en.wikipedia.org/wiki/Median_(statistics)
http://en.wikipedia.org/wiki/Interquartile_range
"""
def __init__(self, with_centering=True, with_scaling=True, copy=True):
self.with_centering = with_centering
self.with_scaling = with_scaling
self.copy = copy
def _check_array(self, X, copy):
"""Makes sure centering is not enabled for sparse matrices."""
X = check_array(X, accept_sparse=('csr', 'csc'), copy=self.copy,
ensure_2d=False, estimator=self, dtype=FLOAT_DTYPES)
if X.ndim == 1:
warnings.warn(DEPRECATION_MSG_1D, DeprecationWarning)
if sparse.issparse(X):
if self.with_centering:
raise ValueError(
"Cannot center sparse matrices: use `with_centering=False`"
" instead. See docstring for motivation and alternatives.")
return X
def fit(self, X, y=None):
"""Compute the median and quantiles to be used for scaling.
Parameters
----------
X : array-like, shape [n_samples, n_features]
The data used to compute the median and quantiles
used for later scaling along the features axis.
"""
if sparse.issparse(X):
raise TypeError("RobustScaler cannot be fitted on sparse inputs")
X = self._check_array(X, self.copy)
if X.ndim == 1:
warnings.warn(DEPRECATION_MSG_1D, DeprecationWarning)
if self.with_centering:
self.center_ = np.median(X, axis=0)
if self.with_scaling:
q = np.percentile(X, (25, 75), axis=0)
self.scale_ = (q[1] - q[0])
self.scale_ = _handle_zeros_in_scale(self.scale_, copy=False)
return self
def transform(self, X, y=None):
"""Center and scale the data
Parameters
----------
X : array-like
The data used to scale along the specified axis.
"""
if self.with_centering:
check_is_fitted(self, 'center_')
if self.with_scaling:
check_is_fitted(self, 'scale_')
X = self._check_array(X, self.copy)
if X.ndim == 1:
warnings.warn(DEPRECATION_MSG_1D, DeprecationWarning)
if sparse.issparse(X):
if self.with_scaling:
inplace_column_scale(X, 1.0 / self.scale_)
else:
if self.with_centering:
X -= self.center_
if self.with_scaling:
X /= self.scale_
return X
def inverse_transform(self, X):
"""Scale back the data to the original representation
Parameters
----------
X : array-like
The data used to scale along the specified axis.
"""
if self.with_centering:
check_is_fitted(self, 'center_')
if self.with_scaling:
check_is_fitted(self, 'scale_')
X = self._check_array(X, self.copy)
if X.ndim == 1:
warnings.warn(DEPRECATION_MSG_1D, DeprecationWarning)
if sparse.issparse(X):
if self.with_scaling:
inplace_column_scale(X, self.scale_)
else:
if self.with_scaling:
X *= self.scale_
if self.with_centering:
X += self.center_
return X
def robust_scale(X, axis=0, with_centering=True, with_scaling=True, copy=True):
"""Standardize a dataset along any axis
Center to the median and component wise scale
according to the interquartile range.
Read more in the :ref:`User Guide <preprocessing_scaler>`.
Parameters
----------
X : array-like
The data to center and scale.
axis : int (0 by default)
axis used to compute the medians and IQR along. If 0,
independently scale each feature, otherwise (if 1) scale
each sample.
with_centering : boolean, True by default
If True, center the data before scaling.
with_scaling : boolean, True by default
If True, scale the data to unit variance (or equivalently,
unit standard deviation).
copy : boolean, optional, default is True
set to False to perform inplace row normalization and avoid a
copy (if the input is already a numpy array or a scipy.sparse
CSR matrix and if axis is 1).
Notes
-----
This implementation will refuse to center scipy.sparse matrices
since it would make them non-sparse and would potentially crash the
program with memory exhaustion problems.
Instead the caller is expected to either set explicitly
`with_centering=False` (in that case, only variance scaling will be
performed on the features of the CSR matrix) or to call `X.toarray()`
if he/she expects the materialized dense array to fit in memory.
To avoid memory copy the caller should pass a CSR matrix.
See also
--------
:class:`sklearn.preprocessing.RobustScaler` to perform centering and
scaling using the ``Transformer`` API (e.g. as part of a preprocessing
:class:`sklearn.pipeline.Pipeline`)
"""
s = RobustScaler(with_centering=with_centering, with_scaling=with_scaling,
copy=copy)
if axis == 0:
return s.fit_transform(X)
else:
return s.fit_transform(X.T).T
class PolynomialFeatures(BaseEstimator, TransformerMixin):
"""Generate polynomial and interaction features.
Generate a new feature matrix consisting of all polynomial combinations
of the features with degree less than or equal to the specified degree.
For example, if an input sample is two dimensional and of the form
[a, b], the degree-2 polynomial features are [1, a, b, a^2, ab, b^2].
Parameters
----------
degree : integer
The degree of the polynomial features. Default = 2.
interaction_only : boolean, default = False
If true, only interaction features are produced: features that are
products of at most ``degree`` *distinct* input features (so not
``x[1] ** 2``, ``x[0] * x[2] ** 3``, etc.).
include_bias : boolean
If True (default), then include a bias column, the feature in which
all polynomial powers are zero (i.e. a column of ones - acts as an
intercept term in a linear model).
Examples
--------
>>> X = np.arange(6).reshape(3, 2)
>>> X
array([[0, 1],
[2, 3],
[4, 5]])
>>> poly = PolynomialFeatures(2)
>>> poly.fit_transform(X)
array([[ 1., 0., 1., 0., 0., 1.],
[ 1., 2., 3., 4., 6., 9.],
[ 1., 4., 5., 16., 20., 25.]])
>>> poly = PolynomialFeatures(interaction_only=True)
>>> poly.fit_transform(X)
array([[ 1., 0., 1., 0.],
[ 1., 2., 3., 6.],
[ 1., 4., 5., 20.]])
Attributes
----------
powers_ : array, shape (n_input_features, n_output_features)
powers_[i, j] is the exponent of the jth input in the ith output.
n_input_features_ : int
The total number of input features.
n_output_features_ : int
The total number of polynomial output features. The number of output
features is computed by iterating over all suitably sized combinations
of input features.
Notes
-----
Be aware that the number of features in the output array scales
polynomially in the number of features of the input array, and
exponentially in the degree. High degrees can cause overfitting.
See :ref:`examples/linear_model/plot_polynomial_interpolation.py
<example_linear_model_plot_polynomial_interpolation.py>`
"""
def __init__(self, degree=2, interaction_only=False, include_bias=True):
self.degree = degree
self.interaction_only = interaction_only
self.include_bias = include_bias
@staticmethod
def _combinations(n_features, degree, interaction_only, include_bias):
comb = (combinations if interaction_only else combinations_w_r)
start = int(not include_bias)
return chain.from_iterable(comb(range(n_features), i)
for i in range(start, degree + 1))
@property
def powers_(self):
check_is_fitted(self, 'n_input_features_')
combinations = self._combinations(self.n_input_features_, self.degree,
self.interaction_only,
self.include_bias)
return np.vstack(np.bincount(c, minlength=self.n_input_features_)
for c in combinations)
def fit(self, X, y=None):
"""
Compute number of output features.
"""
n_samples, n_features = check_array(X).shape
combinations = self._combinations(n_features, self.degree,
self.interaction_only,
self.include_bias)
self.n_input_features_ = n_features
self.n_output_features_ = sum(1 for _ in combinations)
return self
def transform(self, X, y=None):
"""Transform data to polynomial features
Parameters
----------
X : array-like, shape [n_samples, n_features]
The data to transform, row by row.
Returns
-------
XP : np.ndarray shape [n_samples, NP]
The matrix of features, where NP is the number of polynomial
features generated from the combination of inputs.
"""
check_is_fitted(self, ['n_input_features_', 'n_output_features_'])
X = check_array(X, dtype=FLOAT_DTYPES)
n_samples, n_features = X.shape
if n_features != self.n_input_features_:
raise ValueError("X shape does not match training shape")
# allocate output data
XP = np.empty((n_samples, self.n_output_features_), dtype=X.dtype)
combinations = self._combinations(n_features, self.degree,
self.interaction_only,
self.include_bias)
for i, c in enumerate(combinations):
XP[:, i] = X[:, c].prod(1)
return XP
def normalize(X, norm='l2', axis=1, copy=True, return_norm=False):
"""Scale input vectors individually to unit norm (vector length).
Read more in the :ref:`User Guide <preprocessing_normalization>`.
Parameters
----------
X : {array-like, sparse matrix}, shape [n_samples, n_features]
The data to normalize, element by element.
scipy.sparse matrices should be in CSR format to avoid an
un-necessary copy.
norm : 'l1', 'l2', or 'max', optional ('l2' by default)
The norm to use to normalize each non zero sample (or each non-zero
feature if axis is 0).
axis : 0 or 1, optional (1 by default)
axis used to normalize the data along. If 1, independently normalize
each sample, otherwise (if 0) normalize each feature.
copy : boolean, optional, default True
set to False to perform inplace row normalization and avoid a
copy (if the input is already a numpy array or a scipy.sparse
CSR matrix and if axis is 1).
return_norm : boolean, default False
whether to return the computed norms
See also
--------
:class:`sklearn.preprocessing.Normalizer` to perform normalization
using the ``Transformer`` API (e.g. as part of a preprocessing
:class:`sklearn.pipeline.Pipeline`)
"""
if norm not in ('l1', 'l2', 'max'):
raise ValueError("'%s' is not a supported norm" % norm)
if axis == 0:
sparse_format = 'csc'
elif axis == 1:
sparse_format = 'csr'
else:
raise ValueError("'%d' is not a supported axis" % axis)
X = check_array(X, sparse_format, copy=copy, warn_on_dtype=True,
estimator='the normalize function', dtype=FLOAT_DTYPES)
if axis == 0:
X = X.T
if sparse.issparse(X):
if norm == 'l1':
inplace_csr_row_normalize_l1(X)
elif norm == 'l2':
inplace_csr_row_normalize_l2(X)
elif norm == 'max':
_, norms = min_max_axis(X, 1)
norms = norms.repeat(np.diff(X.indptr))
mask = norms != 0
X.data[mask] /= norms[mask]
else:
if norm == 'l1':
norms = np.abs(X).sum(axis=1)
elif norm == 'l2':
norms = row_norms(X)
elif norm == 'max':
norms = np.max(X, axis=1)
norms = _handle_zeros_in_scale(norms, copy=False)
X /= norms[:, np.newaxis]
if axis == 0:
X = X.T
if return_norm:
return X, norms
else:
return X
class Normalizer(BaseEstimator, TransformerMixin):
"""Normalize samples individually to unit norm.
Each sample (i.e. each row of the data matrix) with at least one
non zero component is rescaled independently of other samples so
that its norm (l1 or l2) equals one.
This transformer is able to work both with dense numpy arrays and
scipy.sparse matrix (use CSR format if you want to avoid the burden of
a copy / conversion).
Scaling inputs to unit norms is a common operation for text
classification or clustering for instance. For instance the dot
product of two l2-normalized TF-IDF vectors is the cosine similarity
of the vectors and is the base similarity metric for the Vector
Space Model commonly used by the Information Retrieval community.
Read more in the :ref:`User Guide <preprocessing_normalization>`.
Parameters
----------
norm : 'l1', 'l2', or 'max', optional ('l2' by default)
The norm to use to normalize each non zero sample.
copy : boolean, optional, default True
set to False to perform inplace row normalization and avoid a
copy (if the input is already a numpy array or a scipy.sparse
CSR matrix).
Notes
-----
This estimator is stateless (besides constructor parameters), the
fit method does nothing but is useful when used in a pipeline.
See also
--------
:func:`sklearn.preprocessing.normalize` equivalent function
without the object oriented API
"""
def __init__(self, norm='l2', copy=True):
self.norm = norm
self.copy = copy
def fit(self, X, y=None):
"""Do nothing and return the estimator unchanged
This method is just there to implement the usual API and hence
work in pipelines.
"""
X = check_array(X, accept_sparse='csr')
return self
def transform(self, X, y=None, copy=None):
"""Scale each non zero row of X to unit norm
Parameters
----------
X : {array-like, sparse matrix}, shape [n_samples, n_features]
The data to normalize, row by row. scipy.sparse matrices should be
in CSR format to avoid an un-necessary copy.
"""
copy = copy if copy is not None else self.copy
X = check_array(X, accept_sparse='csr')
return normalize(X, norm=self.norm, axis=1, copy=copy)
def binarize(X, threshold=0.0, copy=True):
"""Boolean thresholding of array-like or scipy.sparse matrix
Read more in the :ref:`User Guide <preprocessing_binarization>`.
Parameters
----------
X : {array-like, sparse matrix}, shape [n_samples, n_features]
The data to binarize, element by element.
scipy.sparse matrices should be in CSR or CSC format to avoid an
un-necessary copy.
threshold : float, optional (0.0 by default)
Feature values below or equal to this are replaced by 0, above it by 1.
Threshold may not be less than 0 for operations on sparse matrices.
copy : boolean, optional, default True
set to False to perform inplace binarization and avoid a copy
(if the input is already a numpy array or a scipy.sparse CSR / CSC
matrix and if axis is 1).
See also
--------
:class:`sklearn.preprocessing.Binarizer` to perform binarization
using the ``Transformer`` API (e.g. as part of a preprocessing
:class:`sklearn.pipeline.Pipeline`)
"""
X = check_array(X, accept_sparse=['csr', 'csc'], copy=copy)
if sparse.issparse(X):
if threshold < 0:
raise ValueError('Cannot binarize a sparse matrix with threshold '
'< 0')
cond = X.data > threshold
not_cond = np.logical_not(cond)
X.data[cond] = 1
X.data[not_cond] = 0
X.eliminate_zeros()
else:
cond = X > threshold
not_cond = np.logical_not(cond)
X[cond] = 1
X[not_cond] = 0
return X
class Binarizer(BaseEstimator, TransformerMixin):
"""Binarize data (set feature values to 0 or 1) according to a threshold
Values greater than the threshold map to 1, while values less than
or equal to the threshold map to 0. With the default threshold of 0,
only positive values map to 1.
Binarization is a common operation on text count data where the
analyst can decide to only consider the presence or absence of a
feature rather than a quantified number of occurrences for instance.
It can also be used as a pre-processing step for estimators that
consider boolean random variables (e.g. modelled using the Bernoulli
distribution in a Bayesian setting).
Read more in the :ref:`User Guide <preprocessing_binarization>`.
Parameters
----------
threshold : float, optional (0.0 by default)
Feature values below or equal to this are replaced by 0, above it by 1.
Threshold may not be less than 0 for operations on sparse matrices.
copy : boolean, optional, default True
set to False to perform inplace binarization and avoid a copy (if
the input is already a numpy array or a scipy.sparse CSR matrix).
Notes
-----
If the input is a sparse matrix, only the non-zero values are subject
to update by the Binarizer class.
This estimator is stateless (besides constructor parameters), the
fit method does nothing but is useful when used in a pipeline.
"""
def __init__(self, threshold=0.0, copy=True):
self.threshold = threshold
self.copy = copy
def fit(self, X, y=None):
"""Do nothing and return the estimator unchanged
This method is just there to implement the usual API and hence
work in pipelines.
"""
check_array(X, accept_sparse='csr')
return self
def transform(self, X, y=None, copy=None):
"""Binarize each element of X
Parameters
----------
X : {array-like, sparse matrix}, shape [n_samples, n_features]
The data to binarize, element by element.
scipy.sparse matrices should be in CSR format to avoid an
un-necessary copy.
"""
copy = copy if copy is not None else self.copy
return binarize(X, threshold=self.threshold, copy=copy)
class KernelCenterer(BaseEstimator, TransformerMixin):
"""Center a kernel matrix
Let K(x, z) be a kernel defined by phi(x)^T phi(z), where phi is a
function mapping x to a Hilbert space. KernelCenterer centers (i.e.,
normalize to have zero mean) the data without explicitly computing phi(x).
It is equivalent to centering phi(x) with
sklearn.preprocessing.StandardScaler(with_std=False).
Read more in the :ref:`User Guide <kernel_centering>`.
"""
def fit(self, K, y=None):
"""Fit KernelCenterer
Parameters
----------
K : numpy array of shape [n_samples, n_samples]
Kernel matrix.
Returns
-------
self : returns an instance of self.
"""
K = check_array(K, dtype=FLOAT_DTYPES)
n_samples = K.shape[0]
self.K_fit_rows_ = np.sum(K, axis=0) / n_samples
self.K_fit_all_ = self.K_fit_rows_.sum() / n_samples
return self
def transform(self, K, y=None, copy=True):
"""Center kernel matrix.
Parameters
----------
K : numpy array of shape [n_samples1, n_samples2]
Kernel matrix.
copy : boolean, optional, default True
Set to False to perform inplace computation.
Returns
-------
K_new : numpy array of shape [n_samples1, n_samples2]
"""
check_is_fitted(self, 'K_fit_all_')
K = check_array(K, copy=copy, dtype=FLOAT_DTYPES)
K_pred_cols = (np.sum(K, axis=1) /
self.K_fit_rows_.shape[0])[:, np.newaxis]
K -= self.K_fit_rows_
K -= K_pred_cols
K += self.K_fit_all_
return K
def add_dummy_feature(X, value=1.0):
"""Augment dataset with an additional dummy feature.
This is useful for fitting an intercept term with implementations which
cannot otherwise fit it directly.
Parameters
----------
X : {array-like, sparse matrix}, shape [n_samples, n_features]
Data.
value : float
Value to use for the dummy feature.
Returns
-------
X : {array, sparse matrix}, shape [n_samples, n_features + 1]
Same data with dummy feature added as first column.
Examples
--------
>>> from sklearn.preprocessing import add_dummy_feature
>>> add_dummy_feature([[0, 1], [1, 0]])
array([[ 1., 0., 1.],
[ 1., 1., 0.]])
"""
X = check_array(X, accept_sparse=['csc', 'csr', 'coo'], dtype=FLOAT_DTYPES)
n_samples, n_features = X.shape
shape = (n_samples, n_features + 1)
if sparse.issparse(X):
if sparse.isspmatrix_coo(X):
# Shift columns to the right.
col = X.col + 1
# Column indices of dummy feature are 0 everywhere.
col = np.concatenate((np.zeros(n_samples), col))
# Row indices of dummy feature are 0, ..., n_samples-1.
row = np.concatenate((np.arange(n_samples), X.row))
# Prepend the dummy feature n_samples times.
data = np.concatenate((np.ones(n_samples) * value, X.data))
return sparse.coo_matrix((data, (row, col)), shape)
elif sparse.isspmatrix_csc(X):
# Shift index pointers since we need to add n_samples elements.
indptr = X.indptr + n_samples
# indptr[0] must be 0.
indptr = np.concatenate((np.array([0]), indptr))
# Row indices of dummy feature are 0, ..., n_samples-1.
indices = np.concatenate((np.arange(n_samples), X.indices))
# Prepend the dummy feature n_samples times.
data = np.concatenate((np.ones(n_samples) * value, X.data))
return sparse.csc_matrix((data, indices, indptr), shape)
else:
klass = X.__class__
return klass(add_dummy_feature(X.tocoo(), value))
else:
return np.hstack((np.ones((n_samples, 1)) * value, X))
def _transform_selected(X, transform, selected="all", copy=True):
"""Apply a transform function to portion of selected features
Parameters
----------
X : {array-like, sparse matrix}, shape [n_samples, n_features]
Dense array or sparse matrix.
transform : callable
A callable transform(X) -> X_transformed
copy : boolean, optional
Copy X even if it could be avoided.
selected: "all" or array of indices or mask
Specify which features to apply the transform to.
Returns
-------
X : array or sparse matrix, shape=(n_samples, n_features_new)
"""
if isinstance(selected, six.string_types) and selected == "all":
return transform(X)
X = check_array(X, accept_sparse='csc', copy=copy, dtype=FLOAT_DTYPES)
if len(selected) == 0:
return X
n_features = X.shape[1]
ind = np.arange(n_features)
sel = np.zeros(n_features, dtype=bool)
sel[np.asarray(selected)] = True
not_sel = np.logical_not(sel)
n_selected = np.sum(sel)
if n_selected == 0:
# No features selected.
return X
elif n_selected == n_features:
# All features selected.
return transform(X)
else:
X_sel = transform(X[:, ind[sel]])
X_not_sel = X[:, ind[not_sel]]
if sparse.issparse(X_sel) or sparse.issparse(X_not_sel):
return sparse.hstack((X_sel, X_not_sel))
else:
return np.hstack((X_sel, X_not_sel))
class OneHotEncoder(BaseEstimator, TransformerMixin):
"""Encode categorical integer features using a one-hot aka one-of-K scheme.
The input to this transformer should be a matrix of integers, denoting
the values taken on by categorical (discrete) features. The output will be
a sparse matrix where each column corresponds to one possible value of one
feature. It is assumed that input features take on values in the range
[0, n_values).
This encoding is needed for feeding categorical data to many scikit-learn
estimators, notably linear models and SVMs with the standard kernels.
Read more in the :ref:`User Guide <preprocessing_categorical_features>`.
Parameters
----------
n_values : 'auto', int or array of ints
Number of values per feature.
- 'auto' : determine value range from training data.
- int : number of categorical values per feature.
Each feature value should be in ``range(n_values)``
- array : ``n_values[i]`` is the number of categorical values in
``X[:, i]``. Each feature value should be in ``range(n_values[i])``
categorical_features: "all" or array of indices or mask
Specify what features are treated as categorical.
- 'all' (default): All features are treated as categorical.
- array of indices: Array of categorical feature indices.
- mask: Array of length n_features and with dtype=bool.
Non-categorical features are always stacked to the right of the matrix.
dtype : number type, default=np.float
Desired dtype of output.
sparse : boolean, default=True
Will return sparse matrix if set True else will return an array.
handle_unknown : str, 'error' or 'ignore'
Whether to raise an error or ignore if a unknown categorical feature is
present during transform.
Attributes
----------
active_features_ : array
Indices for active features, meaning values that actually occur
in the training set. Only available when n_values is ``'auto'``.
feature_indices_ : array of shape (n_features,)
Indices to feature ranges.
Feature ``i`` in the original data is mapped to features
from ``feature_indices_[i]`` to ``feature_indices_[i+1]``
(and then potentially masked by `active_features_` afterwards)
n_values_ : array of shape (n_features,)
Maximum number of values per feature.
Examples
--------
Given a dataset with three features and two samples, we let the encoder
find the maximum value per feature and transform the data to a binary
one-hot encoding.
>>> from sklearn.preprocessing import OneHotEncoder
>>> enc = OneHotEncoder()
>>> enc.fit([[0, 0, 3], [1, 1, 0], [0, 2, 1], \
[1, 0, 2]]) # doctest: +ELLIPSIS
OneHotEncoder(categorical_features='all', dtype=<... 'numpy.float64'>,
handle_unknown='error', n_values='auto', sparse=True)
>>> enc.n_values_
array([2, 3, 4])
>>> enc.feature_indices_
array([0, 2, 5, 9])
>>> enc.transform([[0, 1, 1]]).toarray()
array([[ 1., 0., 0., 1., 0., 0., 1., 0., 0.]])
See also
--------
sklearn.feature_extraction.DictVectorizer : performs a one-hot encoding of
dictionary items (also handles string-valued features).
sklearn.feature_extraction.FeatureHasher : performs an approximate one-hot
encoding of dictionary items or strings.
"""
def __init__(self, n_values="auto", categorical_features="all",
dtype=np.float64, sparse=True, handle_unknown='error'):
self.n_values = n_values
self.categorical_features = categorical_features
self.dtype = dtype
self.sparse = sparse
self.handle_unknown = handle_unknown
def fit(self, X, y=None):
"""Fit OneHotEncoder to X.
Parameters
----------
X : array-like, shape [n_samples, n_feature]
Input array of type int.
Returns
-------
self
"""
self.fit_transform(X)
return self
def _fit_transform(self, X):
"""Assumes X contains only categorical features."""
X = check_array(X, dtype=np.int)
if np.any(X < 0):
raise ValueError("X needs to contain only non-negative integers.")
n_samples, n_features = X.shape
if self.n_values == 'auto':
n_values = np.max(X, axis=0) + 1
elif isinstance(self.n_values, numbers.Integral):
if (np.max(X, axis=0) >= self.n_values).any():
raise ValueError("Feature out of bounds for n_values=%d"
% self.n_values)
n_values = np.empty(n_features, dtype=np.int)
n_values.fill(self.n_values)
else:
try:
n_values = np.asarray(self.n_values, dtype=int)
except (ValueError, TypeError):
raise TypeError("Wrong type for parameter `n_values`. Expected"
" 'auto', int or array of ints, got %r"
% type(X))
if n_values.ndim < 1 or n_values.shape[0] != X.shape[1]:
raise ValueError("Shape mismatch: if n_values is an array,"
" it has to be of shape (n_features,).")
self.n_values_ = n_values
n_values = np.hstack([[0], n_values])
indices = np.cumsum(n_values)
self.feature_indices_ = indices
column_indices = (X + indices[:-1]).ravel()
row_indices = np.repeat(np.arange(n_samples, dtype=np.int32),
n_features)
data = np.ones(n_samples * n_features)
out = sparse.coo_matrix((data, (row_indices, column_indices)),
shape=(n_samples, indices[-1]),
dtype=self.dtype).tocsr()
if self.n_values == 'auto':
mask = np.array(out.sum(axis=0)).ravel() != 0
active_features = np.where(mask)[0]
out = out[:, active_features]
self.active_features_ = active_features
return out if self.sparse else out.toarray()
def fit_transform(self, X, y=None):
"""Fit OneHotEncoder to X, then transform X.
Equivalent to self.fit(X).transform(X), but more convenient and more
efficient. See fit for the parameters, transform for the return value.
"""
return _transform_selected(X, self._fit_transform,
self.categorical_features, copy=True)
def _transform(self, X):
"""Assumes X contains only categorical features."""
X = check_array(X, dtype=np.int)
if np.any(X < 0):
raise ValueError("X needs to contain only non-negative integers.")
n_samples, n_features = X.shape
indices = self.feature_indices_
if n_features != indices.shape[0] - 1:
raise ValueError("X has different shape than during fitting."
" Expected %d, got %d."
% (indices.shape[0] - 1, n_features))
# We use only those categorical features of X that are known using fit.
# i.e lesser than n_values_ using mask.
# This means, if self.handle_unknown is "ignore", the row_indices and
# col_indices corresponding to the unknown categorical feature are
# ignored.
mask = (X < self.n_values_).ravel()
if np.any(~mask):
if self.handle_unknown not in ['error', 'ignore']:
raise ValueError("handle_unknown should be either error or "
"unknown got %s" % self.handle_unknown)
if self.handle_unknown == 'error':
raise ValueError("unknown categorical feature present %s "
"during transform." % X.ravel()[~mask])
column_indices = (X + indices[:-1]).ravel()[mask]
row_indices = np.repeat(np.arange(n_samples, dtype=np.int32),
n_features)[mask]
data = np.ones(np.sum(mask))
out = sparse.coo_matrix((data, (row_indices, column_indices)),
shape=(n_samples, indices[-1]),
dtype=self.dtype).tocsr()
if self.n_values == 'auto':
out = out[:, self.active_features_]
return out if self.sparse else out.toarray()
def transform(self, X):
"""Transform X using one-hot encoding.
Parameters
----------
X : array-like, shape [n_samples, n_features]
Input array of type int.
Returns
-------
X_out : sparse matrix if sparse=True else a 2-d array, dtype=int
Transformed input.
"""
return _transform_selected(X, self._transform,
self.categorical_features, copy=True)
|
ssaeger/scikit-learn
|
sklearn/preprocessing/data.py
|
Python
|
bsd-3-clause
| 67,255
|
[
"Gaussian"
] |
b0792d34ed3dc62f992c418a6b8fed88d7883ff7fb36a6a030918e3d2e59a021
|
########################################################################
# $HeadURL$
########################################################################
""" ProfileManager manages web user profiles
in the DISET framework
"""
from __future__ import print_function
__RCSID__ = "$Id$"
import types
from DIRAC.Core.DISET.RequestHandler import RequestHandler
from DIRAC import S_OK, S_ERROR
from DIRAC.FrameworkSystem.DB.UserProfileDB import UserProfileDB
from DIRAC.Core.Security import Properties
gUPDB = False
def initializeUserProfileManagerHandler( serviceInfo ):
global gUPDB
try:
gUPDB = UserProfileDB()
except Exception as e:
return S_ERROR( "Can't initialize UserProfileDB: %s" % str( e ) )
return S_OK()
class UserProfileManagerHandler( RequestHandler ):
types_retrieveProfileVar = [ types.StringTypes, types.StringTypes ]
def export_retrieveProfileVar( self, profileName, varName ):
""" Get profile data for web
"""
credDict = self.getRemoteCredentials()
userName = credDict[ 'username' ]
userGroup = credDict[ 'group' ]
return gUPDB.retrieveVar( userName, userGroup,
userName, userGroup,
profileName, varName )
types_retrieveProfileVarFromUser = [ types.StringTypes, types.StringTypes, types.StringTypes, types.StringTypes ]
def export_retrieveProfileVarFromUser( self, ownerName, ownerGroup, profileName, varName ):
""" Get profile data for web for any user according to perms
"""
credDict = self.getRemoteCredentials()
userName = credDict[ 'username' ]
userGroup = credDict[ 'group' ]
return gUPDB.retrieveVar( userName, userGroup,
ownerName, ownerGroup,
profileName, varName )
types_retrieveProfileAllVars = [ types.StringTypes ]
def export_retrieveProfileAllVars( self, profileName ):
""" Get profile data for web
"""
credDict = self.getRemoteCredentials()
userName = credDict[ 'username' ]
userGroup = credDict[ 'group' ]
return gUPDB.retrieveAllUserVars( userName, userGroup, profileName )
types_storeProfileVar = [ types.StringTypes, types.StringTypes, types.StringTypes, types.DictType ]
def export_storeProfileVar( self, profileName, varName, data, perms ):
""" Set profile data for web
"""
credDict = self.getRemoteCredentials()
userName = credDict[ 'username' ]
userGroup = credDict[ 'group' ]
return gUPDB.storeVar( userName, userGroup, profileName, varName, data, perms )
types_deleteProfileVar = [ types.StringTypes, types.StringTypes ]
def export_deleteProfileVar( self, profileName, varName ):
""" Set profile data for web
"""
credDict = self.getRemoteCredentials()
userName = credDict[ 'username' ]
userGroup = credDict[ 'group' ]
return gUPDB.deleteVar( userName, userGroup, profileName, varName )
types_listAvailableProfileVars = [ types.StringTypes ]
def export_listAvailableProfileVars( self, profileName, filterDict = {} ):
""" Set profile data for web
"""
credDict = self.getRemoteCredentials()
userName = credDict[ 'username' ]
userGroup = credDict[ 'group' ]
return gUPDB.listVars( userName, userGroup, profileName, filterDict )
types_getUserProfiles = []
def export_getUserProfiles( self ):
""" Get all profiles for a user
"""
credDict = self.getRemoteCredentials()
userName = credDict[ 'username' ]
userGroup = credDict[ 'group' ]
return gUPDB.retrieveUserProfiles( userName, userGroup )
types_setProfileVarPermissions = [ types.StringTypes, types.StringTypes, types.DictType ]
def export_setProfileVarPermissions( self, profileName, varName, perms ):
""" Set profile data for web
"""
credDict = self.getRemoteCredentials()
userName = credDict[ 'username' ]
userGroup = credDict[ 'group' ]
return gUPDB.setUserVarPerms( userName, userGroup, profileName, varName, perms )
types_getProfileVarPermissions = [ types.StringTypes, types.StringTypes ]
def export_getProfileVarPermissions( self, profileName, varName ):
""" Set profile data for web
"""
credDict = self.getRemoteCredentials()
userName = credDict[ 'username' ]
userGroup = credDict[ 'group' ]
return gUPDB.retrieveVarPerms( userName, userGroup,
userName, userGroup,
profileName, varName )
types_storeHashTag = [ types.StringTypes ]
def export_storeHashTag( self, tagName ):
""" Set hash tag
"""
credDict = self.getRemoteCredentials()
userName = credDict[ 'username' ]
userGroup = credDict[ 'group' ]
return gUPDB.storeHashTag( userName, userGroup, tagName )
types_retrieveHashTag = [ types.StringTypes ]
def export_retrieveHashTag( self, hashTag ):
""" Get hash tag
"""
credDict = self.getRemoteCredentials()
userName = credDict[ 'username' ]
userGroup = credDict[ 'group' ]
return gUPDB.retrieveHashTag( userName, userGroup, hashTag )
types_retrieveAllHashTags = []
def export_retrieveAllHashTags( self ):
""" Get all hash tags
"""
credDict = self.getRemoteCredentials()
userName = credDict[ 'username' ]
userGroup = credDict[ 'group' ]
return gUPDB.retrieveAllHashTags( userName, userGroup )
types_deleteProfiles = [ types.ListType ]
def export_deleteProfiles( self, userList ):
"""
Delete profiles for a list of users
"""
credDict = self.getRemoteCredentials()
requesterUserName = credDict[ 'username' ]
if Properties.SERVICE_ADMINISTRATOR in credDict[ 'properties' ]:
admin = True
else:
admin = False
for entry in userList:
userName = entry
if admin or userName == requesterUserName:
result = gUPDB.deleteUserProfile( userName )
if not result[ 'OK' ]:
return result
return S_OK()
types_getUserProfileNames = [types.DictType]
def export_getUserProfileNames( self, permission ):
"""
it returns the available profile names by not taking account the permission: ReadAccess and PublishAccess
"""
return gUPDB.getUserProfileNames( permission )
|
petricm/DIRAC
|
FrameworkSystem/Service/UserProfileManagerHandler.py
|
Python
|
gpl-3.0
| 6,219
|
[
"DIRAC"
] |
ff091651c93c6feebb8fef90b4c36b989112b72b9049e7a272fdc194d293b911
|
"""
Align direct images & make mosaics
"""
import os
import inspect
from collections import OrderedDict
import glob
import traceback
import numpy as np
import matplotlib.pyplot as plt
# conda install shapely
# from shapely.geometry.polygon import Polygon
import astropy.io.fits as pyfits
import astropy.wcs as pywcs
import astropy.units as u
import astropy.coordinates as coord
from astropy.table import Table
from . import utils
from . import model
from . import GRIZLI_PATH
# Catalog table tools now put elsewhere
from .catalog import *
def check_status():
"""Make sure all files and modules are in place and print some information if they're not
"""
for ref_dir in ['iref']:
if not os.getenv(ref_dir):
print("""
No ${0} set! Make a directory and point to it in ~/.bashrc or ~/.cshrc.
For example,
$ mkdir $GRIZLI/{0}
$ export {0}="$GRIZLI/{0}/" # put this in ~/.bashrc
""".format(ref_dir))
else:
# WFC3
if not os.getenv('iref').endswith('/'):
print("Warning: $iref should end with a '/' character [{0}]".format(os.getenv('iref')))
test_file = 'iref$uc72113oi_pfl.fits'.replace('iref$', os.getenv('iref'))
if not os.path.exists(test_file):
print("""
HST calibrations not found in $iref [{0}]
To fetch them, run
>>> import grizli.utils
>>> grizli.utils.fetch_default_calibs()
""".format(os.getenv('iref')))
# check_status()
def fresh_flt_file(file, preserve_dq=False, path='../RAW/', verbose=True, extra_badpix=True, apply_grism_skysub=True, crclean=False, mask_regions=True):
"""Copy "fresh" unmodified version of a data file from some central location
Parameters
----------
file : str
Filename
preserve_dq : bool
Preserve DQ arrays of files if they exist in './'
path : str
Path where to find the "fresh" files
verbose : bool
Print information about what's being done
extra_badpix : bool
Apply extra bad pixel mask. Currently this is hard-coded to look for
a file ``badpix_spars200_Nov9.fits`` in the directory specified by
the ``$iref`` environment variable. The file can be downloaded from
https://github.com/gbrammer/wfc3/tree/master/data
apply_grism_skysub : bool
xx nothing now xxx
crclean : bool
Run LACosmicx on the exposure
mask_regions : bool
Apply exposure region mask (like ``_flt.01.mask.reg``) if it exists.
Returns
-------
Nothing, but copies the file from ``path`` to ``./``.
"""
import shutil
try:
import lacosmicx
has_lacosmicx = True
if crclean:
print('Warning (fresh_flt_file): couldn\'t import lacosmicx')
except:
has_lacosmicx = False
local_file = os.path.basename(file)
if preserve_dq:
if os.path.exists(local_file):
im = pyfits.open(local_file)
orig_dq = im['DQ'].data
else:
orig_dq = None
else:
dq = None
if file == local_file:
orig_file = pyfits.open(glob.glob(os.path.join(path, file)+'*')[0])
else:
orig_file = pyfits.open(file)
if dq is not None:
orig_file['DQ'] = dq
head = orig_file[0].header
# Divide grism images by imaging flats
# G102 -> F105W, uc72113oi_pfl.fits
# G141 -> F140W, uc72113oi_pfl.fits
flat, extra_msg = 1., ''
filter = utils.get_hst_filter(head)
# Copy calibs for ACS/UVIS files
if '_flc' in file:
ftpdir = 'https://hst-crds.stsci.edu/unchecked_get/references/hst/'
calib_types = ['IDCTAB', 'NPOLFILE', 'D2IMFILE']
if filter == 'G800L':
calib_types.append('PFLTFILE')
utils.fetch_hst_calibs(orig_file.filename(), ftpdir=ftpdir,
calib_types=calib_types,
verbose=False)
if filter in ['G102', 'G141']:
flat_files = {'G102': 'uc72113oi_pfl.fits',
'G141': 'uc721143i_pfl.fits'}
flat_file = flat_files[filter]
extra_msg = ' / flat: {0}'.format(flat_file)
flat_im = pyfits.open(os.path.join(os.getenv('iref'), flat_file))
flat = flat_im['SCI'].data[5:-5, 5:-5]
flat_dq = (flat < 0.2)
# Grism FLT from IR amplifier gain
pfl_file = orig_file[0].header['PFLTFILE'].replace('iref$',
os.getenv('iref'))
grism_pfl = pyfits.open(pfl_file)[1].data[5:-5, 5:-5]
orig_file['DQ'].data |= 4*flat_dq
orig_file['SCI'].data *= grism_pfl/flat
# if apply_grism_skysub:
# if 'GSKY001' in orig_file:
if filter == 'G280':
# Use F200LP flat
flat_files = {'G280': 'zcv2053ei_pfl.fits'} # F200LP
flat_file = flat_files[filter]
extra_msg = ' / flat: {0}'.format(flat_file)
flat_im = pyfits.open(os.path.join(os.getenv('jref'), flat_file))
for ext in [1, 2]:
flat = flat_im['SCI', ext].data
flat_dq = (flat < 0.2)
orig_file['DQ', ext].data |= 4*flat_dq
orig_file['SCI', ext].data *= 1./flat
if filter == 'G800L':
flat_files = {'G800L': 'n6u12592j_pfl.fits'} # F814W
flat_file = flat_files[filter]
extra_msg = ' / flat: {0}'.format(flat_file)
flat_im = pyfits.open(os.path.join(os.getenv('jref'), flat_file))
pfl_file = orig_file[0].header['PFLTFILE'].replace('jref$',
os.getenv('jref'))
pfl_im = pyfits.open(pfl_file)
for ext in [1, 2]:
flat = flat_im['SCI', ext].data
flat_dq = (flat < 0.2)
grism_pfl = pfl_im['SCI', ext].data
orig_file['DQ', ext].data |= 4*flat_dq
orig_file['SCI', ext].data *= grism_pfl/flat
if orig_file[0].header['NPOLFILE'] == 'N/A':
# Use an F814W file, but this should be updated
orig_file[0].header['NPOLFILE'] = 'jref$v971826jj_npl.fits'
if head['INSTRUME'] == 'WFPC2':
head['DETECTOR'] = 'WFPC2'
if ((head['INSTRUME'] == 'WFC3') & (head['DETECTOR'] == 'IR')
& extra_badpix):
bp = pyfits.open(os.path.join(os.getenv('iref'),
'badpix_spars200_Nov9.fits'))
if orig_file['DQ'].data.shape == bp[0].data.shape:
orig_file['DQ'].data |= bp[0].data
extra_msg += ' / bpix: $iref/badpix_spars200_Nov9.fits'
# New flags for bad pix in old dark reference files
old_darks = ['x5g1509ki_drk.fits']
old_darks += ['xag1929{x}i_drk.fits'.format(x=x) for x in '345689a']
# For more recent SPARS5
old_darks += ['zb21929si_drk.fits']
#need_badpix = head['DARKFILE'].strip('iref$') in old_darks
need_badpix = True # always add the additional bad pix files
if need_badpix:
new_bp = pyfits.open(os.path.join(os.path.dirname(__file__),
'data',
'wfc3ir_dark_badpix_2019.01.12.fits.gz'))
if orig_file['DQ'].data.shape == new_bp[0].data.shape:
orig_file['DQ'].data |= new_bp[0].data
extra_msg += ' / wfc3ir_dark_badpix_2019.01.12.fits'
if crclean & has_lacosmicx:
for ext in [1, 2]:
print('Clean CRs with LACosmic, extension {0:d}'.format(ext))
sci = orig_file['SCI', ext].data
dq = orig_file['DQ', ext].data
crmask, clean = lacosmicx.lacosmicx(sci, inmask=None,
sigclip=4.5, sigfrac=0.3, objlim=5.0, gain=1.0,
readnoise=6.5, satlevel=65536.0, pssl=0.0, niter=4,
sepmed=True, cleantype='meanmask', fsmode='median',
psfmodel='gauss', psffwhm=2.5, psfsize=7, psfk=None,
psfbeta=4.765, verbose=False)
dq[crmask] |= 1024
#sci[crmask] = 0
logstr = '# {0} -> {1} {2}'
logstr = logstr.format(orig_file.filename(), local_file, extra_msg)
utils.log_comment(utils.LOGFILE, logstr, verbose=verbose)
# WFPC2
if '_c0' in file:
# point to FITS reference files
for key in ['MASKFILE', 'ATODFILE', 'BLEVFILE', 'BLEVDFIL', 'BIASFILE', 'BIASDFIL', 'DARKFILE', 'DARKDFIL', 'FLATFILE', 'FLATDFIL', 'SHADFILE']:
ref_file = '_'.join(head[key].split('.'))+'.fits'
orig_file[0].header[key] = ref_file.replace('h.fits', 'f.fits')
waiv = orig_file[0].header['FLATFILE']
orig_file[0].header['FLATFILE'] = waiv.replace('.fits', '_c0h.fits')
if not os.path.exists(''):
pass
#
# ## testing
# orig_file[0].header['FLATFILE'] = 'm341820ju_pfl.fits'
# Make sure has correct header keys
for ext in range(4):
if 'BUNIT' not in orig_file[ext+1].header:
orig_file[ext+1].header['BUNIT'] = 'COUNTS'
# Copy WFPC2 DQ file (c1m)
dqfile = os.path.join(path, file.replace('_c0', '_c1'))
print('Copy WFPC2 DQ file: {0}'.format(dqfile))
if os.path.exists(os.path.basename(dqfile)):
os.remove(os.path.basename(dqfile))
shutil.copy(dqfile, './')
# Add additional masking since AstroDrizzle having trouble with flats
flat_file = orig_file[0].header['FLATFILE'].replace('uref$', os.getenv('uref')+'/')
pfl = pyfits.open(flat_file)
c1m = pyfits.open(os.path.basename(dqfile), mode='update')
for ext in [1, 2, 3, 4]:
mask = pfl[ext].data > 1.3
c1m[ext].data[mask] |= 2
c1m.flush()
orig_file.writeto(local_file, overwrite=True)
if mask_regions:
apply_region_mask(local_file, dq_value=1024)
def apply_persistence_mask(flt_file, path='../Persistence', dq_value=1024,
err_threshold=0.6, sci_threshold=0.1,
grow_mask=3, subtract=True,
verbose=True, reset=False):
"""Make a mask for pixels flagged as being affected by persistence
Persistence products can be downloaded from https://archive.stsci.edu/prepds/persist/search.php, specifically the
"_persist.fits" files.
Parameters
----------
flt_file : str
Filename of the WFC3/IR FLT exposure
path : str
Path to look for the "persist.fits" file.
dq_value : int
DQ bit to flip for flagged pixels
err_threshold : float
ERR array threshold for defining affected pixels:
>>> flagged = persist > err_threshold*ERR
grow_mask : int
Factor by which to dilate the persistence mask.
subtract : bool
Subtract the persistence model itself from the SCI extension.
reset : bool
Unset `dq_value` bit.
verbose : bool
Print information to the terminal
Returns
-------
Nothing, updates the DQ extension of `flt_file`.
"""
import scipy.ndimage as nd
flt = pyfits.open(flt_file, mode='update')
pers_file = os.path.join(path,
os.path.basename(flt_file).replace('_flt.fits', '_persist.fits'))
if not os.path.exists(pers_file):
logstr = '# Persistence file {0} not found'.format(pers_file)
utils.log_comment(utils.LOGFILE, logstr, verbose=verbose)
# return 0
pers = pyfits.open(pers_file)
if pers['SCI'].data.min() < -40:
subtract = False
pers_data = pers['SCI'].data*1
pers_data = np.maximum(pers_data, 0)
pers_mask = pers['SCI'].data > err_threshold*flt['ERR'].data
#pers_mask &= pers['SCI'].data > sci_threshold*flt['SCI'].data
if grow_mask > 0:
pers_mask = nd.maximum_filter(pers_mask*1, size=grow_mask)
else:
pers_mask = pers_mask * 1
NPERS = pers_mask.sum()
logstr = '# {0}: flagged {1:d} pixels affected by persistence (pers/err={2:.2f})'.format(pers_file, NPERS, err_threshold)
utils.log_comment(utils.LOGFILE, logstr, verbose=verbose)
flt[0].header['PERSNPIX'] = (NPERS, 'Number of persistence-flagged pixels')
flt[0].header['PERSLEVL'] = (err_threshold, 'Perristence threshold err_threshold')
flt[0].header['PERSGROW'] = (grow_mask, 'Perristence mask dilation grow_mask')
if reset:
flt['DQ'].data -= (flt['DQ'].data & dq_value)
if NPERS > 0:
flt['DQ'].data[pers_mask > 0] |= dq_value
if subtract:
dont_subtract = False
if 'SUBPERS' in flt[0].header:
if flt[0].header['SUBPERS']:
dont_subtract = True
if not dont_subtract:
flt['SCI'].data -= pers_data
flt['ERR'].data = np.sqrt(flt['ERR'].data**2+pers_data**2)
flt[0].header['SUBPERS'] = (True, 'Persistence model subtracted')
flt.flush()
flt.close()
def apply_region_mask(flt_file, dq_value=1024, verbose=True):
"""Apply DQ mask from a DS9 region file
Parameters
----------
flt_file : str
Filename of a FLT exposure. The function searches for region files
with filenames like
>>> mask_file = flt_file.replace('_flt.fits','.{ext}.mask.reg')
where ``{ext}`` is an integer referring to the SCI extension in the
FLT file (1 for WFC3/IR, 1 or 2 for ACS/WFC and WFC3/UVIS).
dq_value : int
DQ bit to flip for affected pixels
Returns
-------
Nothing, but updates the ``DQ`` extension of `flt_file` if a mask file
is found
"""
import pyregion
mask_files = glob.glob(flt_file.replace('_flt.fits', '.*.mask.reg').replace('_flc.fits', '.*.mask.reg').replace('_c0m.fits', '.*.mask.reg').replace('_c0f.fits', '.*.mask.reg'))
if len(mask_files) == 0:
return True
logstr = '# Region mask for {0}: {1}'.format(flt_file, mask_files)
utils.log_comment(utils.LOGFILE, logstr, verbose=verbose)
flt = pyfits.open(flt_file, mode='update')
for mask_file in mask_files:
ext = int(mask_file.split('.')[-3])
try:
reg = pyregion.open(mask_file).as_imagecoord(flt['SCI', ext].header)
mask = reg.get_mask(hdu=flt['SCI', ext])
except:
# Above fails for lookup-table distortion (ACS / UVIS)
# Here just assume the region file is defined in image coords
reg = pyregion.open(mask_file)
mask = reg.get_mask(shape=flt['SCI', ext].data.shape)
flt['DQ', ext].data[mask] |= dq_value
flt.flush()
return True
def apply_saturated_mask(flt_file, dq_value=1024, verbose=True):
"""Saturated WFC3/IR pixels have some pulldown in the opposite amplifier
Parameters
----------
flt_file : str
Filename of the FLT exposure
dq_value : int
DQ bit to flip for affected pixels
Returns
-------
Nothing, modifies DQ extension of `flt_file` in place.
"""
import scipy.ndimage as nd
flt = pyfits.open(flt_file, mode='update')
sat = (((flt['DQ'].data & 256) > 0) & ((flt['DQ'].data & 4) == 0))
# Don't flag pixels in lower right corner
sat[:80, -80:] = False
# Flag only if a number of nearby pixels also saturated
kern = np.ones((3, 3))
sat_grow = nd.convolve(sat*1, kern)
sat_mask = (sat & (sat_grow > 2))[::-1, :]*1
NSAT = sat_mask.sum()
logstr = '# {0}: flagged {1:d} pixels affected by saturation pulldown'
logstr = logstr.format(flt_file, NSAT)
utils.log_comment(utils.LOGFILE, logstr, verbose=verbose)
if NSAT > 0:
flt['DQ'].data[sat_mask > 0] |= dq_value
flt.flush()
def clip_lists(input, output, clip=20):
"""Clip [x,y] arrays of objects that don't have a match within `clip` pixels in either direction
Parameters
----------
input : (array, array)
Input pixel/array coordinates
output : (array, array)
Output pixel/array coordinates
clip : float
Matching distance
Returns
-------
in_clip, out_clip : (array, array)
Clipped coordinate lists
"""
import scipy.spatial
tree = scipy.spatial.cKDTree(input, 10)
# Forward
N = output.shape[0]
dist, ix = np.zeros(N), np.zeros(N, dtype=int)
for j in range(N):
dist[j], ix[j] = tree.query(output[j, :], k=1,
distance_upper_bound=np.inf)
ok = dist < clip
out_clip = output[ok]
if ok.sum() == 0:
print('No matches within `clip={0:f}`'.format(clip))
return False
# Backward
tree = scipy.spatial.cKDTree(out_clip, 10)
N = input.shape[0]
dist, ix = np.zeros(N), np.zeros(N, dtype=int)
for j in range(N):
dist[j], ix[j] = tree.query(input[j, :], k=1,
distance_upper_bound=np.inf)
ok = dist < clip
in_clip = input[ok]
return in_clip, out_clip
def match_lists(input, output, transform=None, scl=3600., simple=True,
outlier_threshold=5, toler=5, triangle_size_limit=[5, 800],
triangle_ba_max=0.9, assume_close=False):
"""Compute matched objects and transformation between two (x,y) lists.
Parameters
----------
input : (array, array)
Input pixel/array coordinates
output : (array, array)
Output pixel/array coordinates
transform : None, `skimage.transform` object
Coordinate transformation model. If None, use S
`skimage.transform.SimilarityTransform`, i.e., (shift, scale, rot)
scl : float
Not used
simple : bool
Find matches manually within `outlier_threshold`. If False, find
matches with `skimage.measure.ransac` and the specified `transform`
outlier_threshold : float
Match threshold for ``simple=False``
triangle_size_limit : (float, float)
Size limit of matching triangles, generally set to something of order
of the detector size
triangle_ba_max : float
Maximum length/height ratio of matching triangles
assume_close : bool
Not used
Returns
-------
input_ix : (array, array)
Array indices of matches from `input`
output_ix : (array, array)
Array indices of matches from `output`
outliers : (array, array)
Array indices of outliers
model : transform
Instance of the `transform` object based on the matches
"""
import copy
from astropy.table import Table
import skimage.transform
from skimage.measure import ransac
import stsci.stimage
try:
import tristars
from tristars.match import match_catalog_tri
except ImportError:
print("""
Couldn't `import tristars`. Get it from https://github.com/gbrammer/tristars to enable improved blind astrometric matching with triangle asterisms.
""")
if transform is None:
transform = skimage.transform.SimilarityTransform
# print 'xyxymatch'
if (len(output) == 0) | (len(input) == 0):
print('No entries!')
return input, output, None, transform()
try:
pair_ix = match_catalog_tri(input, output, maxKeep=10, auto_keep=3,
auto_transform=transform,
auto_limit=outlier_threshold,
size_limit=triangle_size_limit,
ignore_rot=False, ignore_scale=True,
ba_max=triangle_ba_max)
input_ix = pair_ix[:, 0]
output_ix = pair_ix[:, 1]
msg = ' tristars.match: Nin={0}, Nout={1}, match={2}'
print(msg.format(len(input), len(output), len(output_ix)))
# if False:
# fig = match.match_diagnostic_plot(input, output, pair_ix, tf=None, new_figure=True)
# fig.savefig('/tmp/xtristars.png')
# plt.close(fig)
#
# tform = get_transform(input, output, pair_ix, transform=transform, use_ransac=True)
except:
utils.log_exception(utils.LOGFILE, traceback)
utils.log_comment(utils.LOGFILE, "# ! tristars failed")
match = stsci.stimage.xyxymatch(copy.copy(input), copy.copy(output),
origin=np.median(input, axis=0),
mag=(1.0, 1.0), rotation=(0.0, 0.0),
ref_origin=np.median(input, axis=0),
algorithm='tolerance',
tolerance=toler,
separation=0.5, nmatch=10,
maxratio=10.0,
nreject=10)
m = Table(match)
output_ix = m['ref_idx'].data
input_ix = m['input_idx'].data
print(' xyxymatch.match: Nin={0}, Nout={1}, match={2}'.format(len(input), len(output), len(output_ix)))
tf = transform()
tf.estimate(input[input_ix, :], output[output_ix])
if not simple:
model, inliers = ransac((input[input_ix, :], output[output_ix, :]),
transform, min_samples=3,
residual_threshold=3, max_trials=100)
# Iterate
if inliers.sum() > 2:
m_i, in_i = ransac((input[input_ix[inliers], :],
output[output_ix[inliers], :]),
transform, min_samples=3,
residual_threshold=3, max_trials=100)
if in_i.sum() > 2:
model = m_i
inliers[np.arange(len(inliers), dtype=np.int)[inliers][in_i]] = False
outliers = ~inliers
mout = model(input[input_ix, :])
dx = mout - output[output_ix]
else:
model = tf
# Compute statistics
if len(input_ix) > 10:
mout = tf(input[input_ix, :])
dx = mout - output[output_ix]
dr = np.sqrt(np.sum(dx**2, axis=1))
outliers = dr > outlier_threshold
else:
outliers = np.zeros(len(input_ix), dtype=bool)
return input_ix, output_ix, outliers, model
def align_drizzled_image(root='',
mag_limits=[14, 23],
radec=None,
NITER=3,
clip=20,
log=True,
outlier_threshold=5,
verbose=True,
guess=[0., 0., 0., 1],
simple=True,
rms_limit=2,
use_guess=False,
triangle_size_limit=[5, 1800],
max_sources=200,
triangle_ba_max=0.9,
max_err_percentile=99,
catalog_mask_pad=0.05,
min_flux_radius=1.,
match_catalog_density=None,
assume_close=False,
ref_border=100):
"""Pipeline for astrometric alignment of drizzled image products
1. Generate source catalog from image mosaics
2. Trim catalog lists
3. Find matches and compute (shift, rot, scale) transform
Parameters
----------
root : str
Image product rootname, passed to `~grizli.prep.make_SEP_catalog`
mag_limits : (float, float)
AB magnitude limits of objects in the image catalog to use for
the alignment
radec : str or (array, array)
Reference catalog positions (ra, dec). If `str`, will read from a
file with `np.loadtxt`, assuming just two columns
NITER : int
Number of matching/transform iterations to perform
clip : float
If positive, then coordinate arrays will be clipped with
`~grizli.prep.clip_lists`.
log : bool
Write results to `wcs.log` file and make a diagnostic figure
verbose : bool
Print status message to console
guess : list
Initial guess for alignment:
>>> guess = [0., 0., 0., 1]
>>> guess = [xshift, yshift, rot, scale]
use_guess : bool
Use the `guess`
rms_limit : float
If transform RMS exceeds this threshold, use null [0,0,0,1] transform
simple : bool
Parameter for `~grizli.prep.match_lists`
outlier_threshold : float
Parameter for `~grizli.prep.match_lists`
triangle_size_limit : (float, float)
Parameter for `~grizli.prep.match_lists`
triangle_ba_max : float
Parameter for `~grizli.prep.match_lists`
max_sources : int
Maximum number of sources to use for the matches. Triangle matching
combinatorics become slow for hundreds of sources
max_err_percentile : float
Only use sources where weight image is greater than this percentile
to try to limit spurious sources in low-weight regions
(`~grizli.utils.catalog_mask`)
catalog_mask_pad : float
Mask sources outside of this fractional size of the image dimensions
to try to limit spurius sources (`~grizli.utils.catalog_mask`)
match_catalog_density : bool, None
Try to roughly match the surface density of the reference and target
source lists, where the latter is sorted by brightness to try to
reduce spurious triangle matches
assume_close : bool
not used
ref_border : float
Only include reference sources within `ref_border` pixels of the
target image, as calculated from the original image WCS
Returns
-------
orig_wcs : `~astropy.wcs.WCS`
Original WCS
drz_wcs : `~astropy.wcs.WCS`
Transformed WCS
out_shift : (float, float)
Translation, pixels
out_rot : float
Rotation (degrees)
out_scale : float
Scale
"""
frame = inspect.currentframe()
utils.log_function_arguments(utils.LOGFILE, frame,
'prep.align_drizzled_image')
if not os.path.exists('{0}.cat.fits'.format(root)):
#cat = make_drz_catalog(root=root)
cat = make_SEP_catalog(root=root)
else:
cat = utils.read_catalog('{0}.cat.fits'.format(root))
if hasattr(radec, 'upper'):
rd_ref = np.loadtxt(radec)
radec_comment = radec
if match_catalog_density is None:
match_catalog_density = '.cat.radec' not in radec
elif radec is False:
# Align to self, i.e., do nothing
so = np.argsort(cat['MAG_AUTO'])
rd_ref = np.array([cat['X_WORLD'], cat['Y_WORLD']]).T[so[:50], :]
radec_comment = 'self catalog'
if match_catalog_density is None:
match_catalog_density = False
else:
rd_ref = radec*1
radec_comment = 'input arrays (N={0})'.format(rd_ref.shape)
if match_catalog_density is None:
match_catalog_density = False
# Clip obviously distant files to speed up match
# rd_cat = np.array([cat['X_WORLD'], cat['Y_WORLD']])
# rd_cat_center = np.median(rd_cat, axis=1)
# cosdec = np.array([np.cos(rd_cat_center[1]/180*np.pi),1])
# dr_cat = np.sqrt(np.sum((rd_cat.T-rd_cat_center)**2*cosdec**2, axis=1))
#
# #print('xxx', rd_ref.shape, rd_cat_center.shape, cosdec.shape)
#
# dr = np.sqrt(np.sum((rd_ref-rd_cat_center)**2*cosdec**2, axis=1))
#
# rd_ref = rd_ref[dr < 1.1*dr_cat.max(),:]
ok = (cat['MAG_AUTO'] > mag_limits[0]) & (cat['MAG_AUTO'] < mag_limits[1])
if len(mag_limits) > 2:
ok &= cat['MAGERR_AUTO'] < mag_limits[2]
else:
ok &= cat['MAGERR_AUTO'] < 0.05
if ok.sum() == 0:
print('{0}.cat: no objects found in magnitude range {1}'.format(root,
mag_limits))
return False
# Edge and error mask
ok &= utils.catalog_mask(cat, max_err_percentile=max_err_percentile,
pad=catalog_mask_pad, pad_is_absolute=False,
min_flux_radius=min_flux_radius)
if max_err_percentile >= 200:
med_err = np.median(cat['FLUXERR_APER_0'][ok])
max_err = med_err*np.sqrt(2)
ok_err = cat['FLUXERR_APER_0'] < max_err
if ok_err.sum() > 5:
ok &= ok_err
xy_drz = np.array([cat['X_IMAGE'][ok], cat['Y_IMAGE'][ok]]).T
drz_file = glob.glob('{0}_dr[zc]_sci.fits'.format(root))[0]
drz_im = pyfits.open(drz_file)
sh = drz_im[0].data.shape
drz_wcs = pywcs.WCS(drz_im[0].header, relax=True)
orig_wcs = drz_wcs.copy()
if use_guess:
drz_wcs = utils.transform_wcs(drz_wcs, guess[:2], guess[2], guess[3])
return orig_wcs, drz_wcs, guess[:2], guess[2]/np.pi*180, guess[3]
##########
# Only include reference objects in the DRZ footprint
pix_origin = 1
ref_x, ref_y = drz_wcs.all_world2pix(rd_ref, pix_origin).T
if hasattr(drz_wcs, '_naxis1'):
nx1, nx2 = drz_wcs._naxis1, drz_wcs._naxis2
else:
nx1, nx2 = drz_wcs._naxis
ref_cut = (ref_x > -ref_border) & (ref_x < nx1+ref_border)
ref_cut &= (ref_y > -ref_border) & (ref_y < nx2+ref_border)
if ref_cut.sum() == 0:
print(f'{root}: no reference objects found in the DRZ footprint')
return False
rd_ref = rd_ref[ref_cut, :]
########
# Match surface density of drizzled and reference catalogs
if match_catalog_density:
icut = np.minimum(ok.sum()-2, int(2*ref_cut.sum()))
# acat = utils.hull_area(cat['X_WORLD'][ok], cat['Y_WORLD'][ok])
# aref = utils.hull_area(rd_ref[:,0], rd_ref[:,1])
cut = np.argsort(cat['MAG_AUTO'][ok])[:icut]
xy_drz = np.array([cat['X_IMAGE'][ok][cut],
cat['Y_IMAGE'][ok][cut]]).T
else:
# Limit to brightest X objects
icut = 400
cut = np.argsort(cat['MAG_AUTO'][ok])[:icut]
xy_drz = np.array([cat['X_IMAGE'][ok][cut],
cat['Y_IMAGE'][ok][cut]]).T
logstr = '# wcs {0} radec="{1}"; Ncat={2}; Nref={3}'
logstr = logstr.format(root, radec, xy_drz.shape[0], rd_ref.shape[0])
utils.log_comment(utils.LOGFILE, logstr, verbose=True)
#out_shift, out_rot, out_scale = np.zeros(2), 0., 1.
out_shift, out_rot, out_scale = guess[:2], guess[2], guess[3]
drz_wcs = utils.transform_wcs(drz_wcs, out_shift, out_rot, out_scale)
logstr = '# wcs {0} (guess) : {1:6.2f} {2:6.2f} {3:7.3f} {4:7.3f}'
logstr = logstr.format(root, guess[0], guess[1], guess[2]/np.pi*180,
1./guess[3])
utils.log_comment(utils.LOGFILE, logstr, verbose=True)
drz_crpix = drz_wcs.wcs.crpix
NGOOD, rms = 0, 0
for iter in range(NITER):
#print('xx iter {0} {1}'.format(iter, NITER))
xy = np.array(drz_wcs.all_world2pix(rd_ref, pix_origin))
pix = np.cast[int](np.round(xy)).T
# Find objects where drz pixels are non-zero
okp = (pix[0, :] > 0) & (pix[1, :] > 0)
okp &= (pix[0, :] < sh[1]) & (pix[1, :] < sh[0])
ok2 = drz_im[0].data[pix[1, okp], pix[0, okp]] != 0
N = ok2.sum()
if clip > 0:
status = clip_lists(xy_drz-drz_crpix, xy+0-drz_crpix, clip=clip)
if not status:
print('Problem xxx')
input, output = status
else:
input, output = xy_drz+0.-drz_crpix, xy+0-drz_crpix
if len(input) > max_sources:
msg = 'Clip input list ({0}) to {1} objects'
print(msg.format(len(input), max_sources))
ix = np.argsort(np.arange(len(input)))[:max_sources]
input = input[ix, :]
if len(output) > max_sources:
msg = 'Clip output list ({0}) to {1} objects'
print(msg.format(len(input), max_sources))
ix = np.argsort(np.arange(len(output)))[:max_sources]
output = output[ix, :]
toler = 5
titer = 0
while (titer < 3):
try:
res = match_lists(output, input, scl=1., simple=simple,
outlier_threshold=outlier_threshold, toler=toler,
triangle_size_limit=triangle_size_limit,
triangle_ba_max=triangle_ba_max,
assume_close=assume_close)
output_ix, input_ix, outliers, tf = res
break
except:
toler += 5
titer += 1
#print(output.shape, output_ix.shape, output_ix.min(), output_ix.max(), titer, toler, input_ix.shape, input.shape)
titer = 0
while (len(input_ix)*1./len(input) < 0.1) & (titer < 3):
titer += 1
toler += 5
try:
res = match_lists(output, input, scl=1., simple=simple,
outlier_threshold=outlier_threshold,
toler=toler,
triangle_size_limit=triangle_size_limit,
triangle_ba_max=triangle_ba_max,
assume_close=assume_close)
except:
pass
output_ix, input_ix, outliers, tf = res
#print(output.shape, output_ix.shape, output_ix.min(), output_ix.max(), titer, toler, input_ix.shape, input.shape)
tf_out = tf(output[output_ix])
dx = input[input_ix] - tf_out
rms = utils.nmad(np.sqrt((dx**2).sum(axis=1)))
if len(outliers) > 20:
outliers = (np.sqrt((dx**2).sum(axis=1)) > 4*rms)
else:
outliers = (np.sqrt((dx**2).sum(axis=1)) > 10*rms)
if outliers.sum() > 0:
res2 = match_lists(output[output_ix][~outliers],
input[input_ix][~outliers], scl=1.,
simple=simple,
outlier_threshold=outlier_threshold,
toler=toler,
triangle_size_limit=triangle_size_limit,
triangle_ba_max=triangle_ba_max)
output_ix2, input_ix2, outliers2, tf = res2
# Log
shift = tf.translation
NGOOD = (~outliers).sum()
logstr = '# wcs {0} ({1:d}) {2:d}: {3:6.2f} {4:6.2f} {5:7.3f} {6:7.3f}'
logstr = logstr.format(root, iter, NGOOD, shift[0], shift[1],
tf.rotation/np.pi*180, 1./tf.scale)
utils.log_comment(utils.LOGFILE, logstr, verbose=verbose)
out_shift += tf.translation
out_rot -= tf.rotation
out_scale *= tf.scale
drz_wcs = utils.transform_wcs(drz_wcs, tf.translation, tf.rotation,
tf.scale)
# drz_wcs.wcs.crpix += tf.translation
# theta = -tf.rotation
# _mat = np.array([[np.cos(theta), -np.sin(theta)],
# [np.sin(theta), np.cos(theta)]])
#
# drz_wcs.wcs.cd = np.dot(drz_wcs.wcs.cd, _mat)/tf.scale
# Bad fit
if (rms > rms_limit) | (NGOOD < 3):
drz_wcs = orig_wcs
out_shift = [0, 0]
out_rot = 0.
out_scale = 1.
log = False
if log:
tf_out = tf(output[output_ix][~outliers])
dx = input[input_ix][~outliers] - tf_out
rms = utils.nmad(np.sqrt((dx**2).sum(axis=1)))
interactive_status = plt.rcParams['interactive']
plt.ioff()
fig = plt.figure(figsize=[6., 6.])
ax = fig.add_subplot(111)
ax.scatter(dx[:, 0], dx[:, 1], alpha=0.5, color='b')
ax.scatter([0], [0], marker='+', color='red', s=40)
ax.set_xlabel(r'$dx$')
ax.set_ylabel(r'$dy$')
ax.set_title(root)
ax.set_xlim(-7*rms, 7*rms)
ax.set_ylim(-7*rms, 7*rms)
ax.grid()
fig.tight_layout(pad=0.1)
fig.savefig('{0}_wcs.png'.format(root))
plt.close()
if interactive_status:
plt.ion()
log_wcs(root, orig_wcs, out_shift, out_rot/np.pi*180, out_scale, rms,
n=NGOOD, initialize=False,
comment=['radec: {0}'.format(radec_comment)])
return orig_wcs, drz_wcs, out_shift, out_rot/np.pi*180, out_scale
def update_wcs_fits_log(file, wcs_ref, xyscale=[0, 0, 0, 1], initialize=True, replace=('.fits', '.wcslog.fits'), wcsname='SHIFT'):
"""
Make FITS log when updating WCS
"""
new_hdu = wcs_ref.to_fits(relax=True)[0]
new_hdu.header['XSHIFT'] = xyscale[0]
new_hdu.header['YSHIFT'] = xyscale[1]
new_hdu.header['ROT'] = xyscale[2], 'WCS fit rotation, degrees'
new_hdu.header['SCALE'] = xyscale[3], 'WCS fit scale'
new_hdu.header['WCSNAME'] = wcsname
wcs_logfile = file.replace(replace[0], replace[1])
if os.path.exists(wcs_logfile):
if initialize:
os.remove(wcs_logfile)
hdu = pyfits.HDUList([pyfits.PrimaryHDU()])
else:
hdu = pyfits.open(wcs_logfile)
else:
hdu = pyfits.HDUList([pyfits.PrimaryHDU()])
hdu.append(new_hdu)
hdu.writeto(wcs_logfile, overwrite=True, output_verify='fix')
def log_wcs(root, drz_wcs, shift, rot, scale, rms=0., n=-1, initialize=True, comment=[]):
"""Save WCS offset information to an ascii file
"""
if (not os.path.exists('{0}_wcs.log'.format(root))) | initialize:
print('Initialize {0}_wcs.log'.format(root))
orig_hdul = pyfits.HDUList()
fp = open('{0}_wcs.log'.format(root), 'w')
fp.write('# ext xshift yshift rot scale rms N\n')
for c in comment:
fp.write('# {0}\n'.format(c))
fp.write('# {0}\n'.format(root))
count = 0
else:
orig_hdul = pyfits.open('{0}_wcs.fits'.format(root))
fp = open('{0}_wcs.log'.format(root), 'a')
count = len(orig_hdul)
hdu = drz_wcs.to_fits()[0]
hdu.header['XSHIFT'] = shift[0]
hdu.header['YSHIFT'] = shift[1]
hdu.header['ROT'] = rot, 'WCS fit rotation, degrees'
hdu.header['SCALE'] = scale, 'WCS fit scale'
hdu.header['FIT_RMS'] = rot, 'WCS fit RMS'
hdu.header['FIT_N'] = n, 'Number of sources in WCS fit'
orig_hdul.append(hdu)
orig_hdul.writeto('{0}_wcs.fits'.format(root), overwrite=True)
fp.write('{0:5d} {1:13.4f} {2:13.4f} {3:13.4f} {4:13.5f} {5:13.3f} {6:4d}\n'.format(
count, shift[0], shift[1], rot, scale, rms, n))
fp.close()
SEXTRACTOR_DEFAULT_PARAMS = ["NUMBER", "X_IMAGE", "Y_IMAGE", "X_WORLD",
"Y_WORLD", "A_IMAGE", "B_IMAGE", "THETA_IMAGE",
"MAG_AUTO", "MAGERR_AUTO", "FLUX_AUTO", "FLUXERR_AUTO",
"FLUX_RADIUS", "BACKGROUND", "FLAGS"]
# Aperture *Diameters*
SEXTRACTOR_PHOT_APERTURES = "6, 8.33335, 11.66667, 16.66667, 20, 25, 50"
SEXTRACTOR_PHOT_APERTURES_ARCSEC = [float(ap)*0.06*u.arcsec for ap in SEXTRACTOR_PHOT_APERTURES.split(',')]
SEXTRACTOR_CONFIG_3DHST = {'DETECT_MINAREA': 14, 'DEBLEND_NTHRESH': 32, 'DEBLEND_MINCONT': 0.005, 'FILTER_NAME': '/usr/local/share/sextractor/gauss_3.0_7x7.conv', 'FILTER': 'Y'}
# /usr/local/share/sextractor/gauss_3.0_7x7.conv
GAUSS_3_7x7 = np.array(
[[0.0049, 0.0213, 0.0513, 0.0687, 0.0513, 0.0213, 0.0049],
[0.0213, 0.0921, 0.2211, 0.2960, 0.2211, 0.0921, 0.0213],
[0.0513, 0.2211, 0.5307, 0.7105, 0.5307, 0.2211, 0.0513],
[0.0687, 0.2960, 0.7105, 0.9511, 0.7105, 0.2960, 0.0687],
[0.0513, 0.2211, 0.5307, 0.7105, 0.5307, 0.2211, 0.0513],
[0.0213, 0.0921, 0.2211, 0.2960, 0.2211, 0.0921, 0.0213],
[0.0049, 0.0213, 0.0513, 0.0687, 0.0513, 0.0213, 0.0049]])
# Try to match 3D-HST detection
SEP_DETECT_PARAMS_3DHST = {'minarea': 9, 'filter_kernel': GAUSS_3_7x7,
'filter_type': 'conv', 'clean': True, 'clean_param': 1,
'deblend_nthresh': 32, 'deblend_cont': 0.005}
# More agressive deblending
SEP_DETECT_PARAMS = {'minarea': 9, 'filter_kernel': GAUSS_3_7x7,
'filter_type': 'conv', 'clean': True, 'clean_param': 1,
'deblend_nthresh': 32, 'deblend_cont': 0.001}
def make_SEP_FLT_catalog(flt_file, ext=1, column_case=str.upper, **kwargs):
"""
Make a catalog from a FLT file
(Not used)
"""
import astropy.io.fits as pyfits
import astropy.wcs as pywcs
im = pyfits.open(flt_file)
sci = im['SCI', ext].data - im['SCI', ext].header['MDRIZSKY']
err = im['ERR', ext].data
mask = im['DQ', ext].data > 0
ZP = utils.calc_header_zeropoint(im, ext=('SCI', ext))
try:
wcs = pywcs.WCS(im['SCI', ext].header, fobj=im)
except:
wcs = None
tab, seg = make_SEP_catalog_from_arrays(sci, err, mask, wcs=wcs, ZP=ZP, **kwargs)
tab.meta['ABZP'] = ZP
tab.meta['FILTER'] = utils.get_hst_filter(im[0].header)
tab['mag_auto'] = ZP - 2.5*np.log10(tab['flux'])
for c in tab.colnames:
tab.rename_column(c, column_case(c))
return tab, seg
def make_SEP_catalog_from_arrays(sci, err, mask, wcs=None, threshold=2., ZP=25, get_background=True, detection_params=SEP_DETECT_PARAMS, segmentation_map=False, verbose=True):
"""
Make a catalog from arrays using `sep`
Parameters
----------
sci : 2D array
Data array
err : 2D array
Uncertainties in same units as `sci`
mask : bool array
`sep` masks values where ``mask > 0``
wcs : `~astropy.wcs.WCS`
WCS associated with data arrays
thresh : float
Detection threshold for `sep.extract`
ZP : float
AB magnitude zeropoint of data arrays
get_background : bool
not used
detection_params : dict
Keyword arguments for `sep.extract`
segmentation_map : bool
Also create a segmentation map
verbose : bool
Print status messages
Returns
-------
tab : `~astropy.table.Table`
Source catalog
seg : array, None
Segmentation map, if requested
"""
import copy
import astropy.units as u
try:
import sep
except ImportError:
print("""
Couldn't `import sep`. SExtractor replaced with SEP
in April 2018. Install with `pip install sep`.
""")
logstr = 'make_SEP_catalog_from_arrays: sep version = {0}'.format(sep.__version__)
utils.log_comment(utils.LOGFILE, logstr, verbose=verbose)
if sep.__version__ < '1.1':
print("""!!!!!!!!!!
! SEP version = {0}
! Get >= 1.10.0 to enable segmentation masking, e.g.,
! $ pip install git+https://github.com/gbrammer/sep.git
!!!!!!!!!!
""".format(sep.__version__))
uJy_to_dn = 1/(3631*1e6*10**(-0.4*ZP))
if sci.dtype != np.float32:
sci_data = sci.byteswap().newbyteorder()
else:
sci_data = sci
if err.dtype != np.float32:
err_data = err.byteswap().newbyteorder()
else:
err_data = err
if segmentation_map:
objects, seg = sep.extract(sci_data, threshold, err=err_data,
mask=mask, segmentation_map=True, **detection_params)
else:
objects = sep.extract(sci_data, threshold, err=err_data,
mask=mask, segmentation_map=False, **detection_params)
seg = None
tab = utils.GTable(objects)
if wcs is not None:
tab['ra'], tab['dec'] = wcs.all_pix2world(tab['x'], tab['y'], 0)
tab['ra'].unit = u.deg
tab['dec'].unit = u.deg
tab['x_world'], tab['y_world'] = tab['ra'], tab['dec']
return tab, seg
def get_SEP_flag_dict():
"""Get dictionary of SEP integer flags
Returns
-------
flags : dict
Dictionary of the integer `sep` detection flags, which are set as
attributes on the `sep` module
"""
try:
import sep
except ImportError:
print("""
Couldn't `import sep`. SExtractor replaced with SEP
in April 2018. Install with `pip install sep`.
""")
flags = OrderedDict()
for f in ['OBJ_MERGED', 'OBJ_TRUNC', 'OBJ_DOVERFLOW', 'OBJ_SINGU',
'APER_TRUNC', 'APER_HASMASKED', 'APER_ALLMASKED',
'APER_NONPOSITIVE']:
flags[f] = getattr(sep, f)
return flags
def make_SEP_catalog(root='',
sci=None,
wht=None,
threshold=2.,
get_background=True,
bkg_only=False,
bkg_params={'bw': 32, 'bh': 32, 'fw': 3, 'fh': 3},
verbose=True,
phot_apertures=SEXTRACTOR_PHOT_APERTURES,
aper_segmask=False,
rescale_weight=True,
err_scale=-np.inf,
use_bkg_err=False,
column_case=str.upper,
save_to_fits=True,
include_wcs_extension=True,
source_xy=None,
compute_auto_quantities=True,
autoparams=[2.5, 0.35*u.arcsec, 2.4, 3.8],
flux_radii=[0.2, 0.5, 0.9],
subpix=0,
mask_kron=False,
max_total_corr=2,
detection_params=SEP_DETECT_PARAMS,
bkg_mask=None,
pixel_scale=0.06,
log=False,
gain=2000.,
extract_pixstack=int(3e7),
**kwargs):
"""Make a catalog from drizzle products using the SEP implementation of SourceExtractor
Parameters
----------
root : str
Rootname of the FITS images to use for source extraction. This
function is designed to work with the single-image products from
`drizzlepac`, so the default data/science image is searched by
>>> drz_file = glob.glob(f'{root}_dr[zc]_sci.fits*')[0]
Note that this will find and use gzipped versions of the images,
if necessary.
The associated weight image filename is then assumed to be
>>> weight_file = drz_file.replace('_sci.fits', '_wht.fits')
>>> weight_file = weight_file.replace('_drz.fits', '_wht.fits')
sci, wht : str
Filenames to override `drz_file` and `weight_file` derived from the
``root`` parameter.
threshold : float
Detection threshold for `sep.extract`
get_background : bool
Compute the background with `sep.Background`
bkg_only : bool
If `True`, then just return the background data array and don't run
the source detection
bkg_params : dict
Keyword arguments for `sep.Background`. Note that this can include
a separate optional keyword ``pixel_scale`` that indicates that the
background sizes `bw`, `bh` are set for a paraticular pixel size.
They will be scaled to the pixel dimensions of the target images using
the pixel scale derived from the image WCS.
verbose : bool
Print status messages
phot_apertures : str or array-like
Photometric aperture *diameters*. If given as a string then assume
units of pixels. If an array or list, can have units, e.g.,
`astropy.units.arcsec`.
aper_segmask : bool
If true, then run SEP photometry with segmentation masking. This
requires the sep fork at https://github.com/gbrammer/sep.git,
or `sep >= 1.10.0`.
rescale_weight : bool
If true, then a scale factor is calculated from the ratio of the
weight image to the variance estimated by `sep.Background`.
err_scale : float
Explicit value to use for the weight scaling, rather than calculating
with `rescale_weight`. Only used if ``err_scale > 0``
use_bkg_err : bool
If true, then use the full error array derived by `sep.Background`.
This is turned off by default in order to preserve the pixel-to-pixel
variation in the drizzled weight maps.
column_case : func
Function to apply to the catalog column names. E.g., the default
`str.upper` results in uppercase column names
save_to_fits : bool
Save catalog FITS file ``{root}.cat.fits``
include_wcs_extension : bool
An extension will be added to the FITS catalog with the detection
image WCS
source_xy : (x, y) or (ra, dec) arrays
Force extraction positions. If the arrays have units, then pass them
through the header WCS. If no units, positions are *zero indexed*
array coordinates.
To run with segmentation masking (`1sep > 1.10``), also provide
`aseg` and `aseg_id` arrays with `source_xy`, like
>>> source_xy = ra, dec, aseg, aseg_id
compute_auto_quantities : bool
Compute Kron/auto-like quantities with
`~grizli.prep.compute_SEP_auto_params`
autoparams : list
Parameters of Kron/AUTO calculations with
`~grizli.prep.compute_SEP_auto_params`.
flux_radii : list
Light fraction radii to compute with
`~grizli.prep.compute_SEP_auto_params`, e.g., ``[0.5]`` will calculate
the half-light radius (``FLUX_RADIUS``)
subpix : int
Pixel oversampling
mask_kron : bool
Not used
max_total_corr : float
Not used
detection_params : dict
Parameters passed to `sep.extract`
bkg_mask : array
Additional mask to apply to `sep.Background` calculation
pixel_scale : float
Not used
log : bool
Send log message to `grizli.utils.LOGFILE`
gain : float
Gain value passed to `sep.sum_circle`
extract_pixstack : int
See `sep.set_extract_pixstack`
Returns
-------
tab : `~astropy.table.Table`
Source catalog
"""
if log:
frame = inspect.currentframe()
utils.log_function_arguments(utils.LOGFILE, frame,
'prep.make_SEP_catalog', verbose=True)
import copy
import astropy.units as u
try:
import sep
except ImportError:
print("""
Couldn't `import sep`. SExtractor replaced with SEP
in April 2018. Install with `pip install sep`.
""")
sep.set_extract_pixstack(extract_pixstack)
logstr = 'make_SEP_catalog: sep version = {0}'.format(sep.__version__)
utils.log_comment(utils.LOGFILE, logstr, verbose=verbose)
if sep.__version__ < '1.1':
print("""!!!!!!!!!!
! SEP version = {0}
! Get >= 1.10.0 to enable segmentation masking, e.g.,
! $ pip install git+https://github.com/gbrammer/sep.git
!!!!!!!!!!
""".format(sep.__version__))
if sci is not None:
drz_file = sci
else:
drz_file = glob.glob(f'{root}_dr[zc]_sci.fits*')[0]
im = pyfits.open(drz_file)
# Filter
drz_filter = utils.get_hst_filter(im[0].header)
if 'PHOTPLAM' in im[0].header:
drz_photplam = im[0].header['PHOTPLAM']
else:
drz_photplam = None
# Get AB zeropoint
ZP = utils.calc_header_zeropoint(im, ext=0)
logstr = 'sep: Image AB zeropoint = {0:.3f}'.format(ZP)
utils.log_comment(utils.LOGFILE, logstr, verbose=verbose, show_date=True)
# Scale fluxes to mico-Jy
uJy_to_dn = 1/(3631*1e6*10**(-0.4*ZP))
if wht is not None:
weight_file = wht
else:
weight_file = drz_file.replace('_sci.fits', '_wht.fits')
weight_file = weight_file.replace('_drz.fits', '_wht.fits')
if (weight_file == drz_file) | (not os.path.exists(weight_file)):
WEIGHT_TYPE = "NONE"
weight_file = None
else:
WEIGHT_TYPE = "MAP_WEIGHT"
drz_im = pyfits.open(drz_file)
data = drz_im[0].data.byteswap().newbyteorder()
try:
wcs = pywcs.WCS(drz_im[0].header)
wcs_header = utils.to_header(wcs)
pixel_scale = utils.get_wcs_pscale(wcs) # arcsec
except:
wcs = None
wcs_header = drz_im[0].header.copy()
pixel_scale = np.sqrt(wcs_header['CD1_1']**2+wcs_header['CD1_2']**2)
pixel_scale *= 3600. # arcsec
# Add some header keywords to the wcs header
for k in ['EXPSTART', 'EXPEND', 'EXPTIME']:
if k in drz_im[0].header:
wcs_header[k] = drz_im[0].header[k]
if isinstance(phot_apertures, str):
apertures = np.cast[float](phot_apertures.replace(',', '').split())
else:
apertures = []
for ap in phot_apertures:
if hasattr(ap, 'unit'):
apertures.append(ap.to(u.arcsec).value/pixel_scale)
else:
apertures.append(ap)
# Do we need to compute the error from the wht image?
need_err = (not use_bkg_err) | (not get_background)
if (weight_file is not None) & need_err:
wht_im = pyfits.open(weight_file)
wht_data = wht_im[0].data.byteswap().newbyteorder()
err = 1/np.sqrt(wht_data)
del(wht_data)
# True mask pixels are masked with sep
mask = (~np.isfinite(err)) | (err == 0) | (~np.isfinite(data))
err[mask] = 0
wht_im.close()
del(wht_im)
else:
# True mask pixels are masked with sep
mask = (data == 0) | (~np.isfinite(data))
err = None
try:
drz_im.close()
del(drz_im)
except:
pass
data_mask = np.cast[data.dtype](mask)
if get_background | (err_scale < 0) | (use_bkg_err):
# Account for pixel scale in bkg_params
bkg_input = {}
if 'pixel_scale' in bkg_params:
bkg_pscale = bkg_params['pixel_scale']
else:
bkg_pscale = pixel_scale
for k in bkg_params:
if k in ['pixel_scale']:
continue
if k in ['bw', 'bh']:
bkg_input[k] = bkg_params[k]*bkg_pscale/pixel_scale
else:
bkg_input[k] = bkg_params[k]
logstr = 'SEP: Get background {0}'.format(bkg_input)
utils.log_comment(utils.LOGFILE, logstr, verbose=verbose,
show_date=True)
if bkg_mask is not None:
bkg = sep.Background(data, mask=mask | bkg_mask, **bkg_input)
else:
bkg = sep.Background(data, mask=mask, **bkg_input)
bkg_data = bkg.back()
if bkg_only:
return bkg_data
if get_background == 2:
bkg_file = '{0}_bkg.fits'.format(root)
if os.path.exists(bkg_file):
logstr = 'SEP: use background file {0}'.format(bkg_file)
utils.log_comment(utils.LOGFILE, logstr, verbose=verbose,
show_date=True)
bkg_im = pyfits.open('{0}_bkg.fits'.format(root))
bkg_data = bkg_im[0].data*1
else:
pyfits.writeto('{0}_bkg.fits'.format(root), data=bkg_data,
header=wcs_header, overwrite=True)
if (err is None) | use_bkg_err:
logstr = 'sep: Use bkg.rms() for error array'
utils.log_comment(utils.LOGFILE, logstr, verbose=verbose,
show_date=True)
err = bkg.rms()
if err_scale == -np.inf:
ratio = bkg.rms()/err
err_scale = np.median(ratio[(~mask) & np.isfinite(ratio)])
else:
# Just return the error scale
if err_scale < 0:
ratio = bkg.rms()/err
xerr_scale = np.median(ratio[(~mask) & np.isfinite(ratio)])
del(bkg)
return xerr_scale
del(bkg)
else:
if err_scale is None:
err_scale = 1.
if not get_background:
bkg_data = 0.
data_bkg = data
else:
data_bkg = data - bkg_data
if rescale_weight:
if verbose:
print('SEP: err_scale={:.3f}'.format(err_scale))
err *= err_scale
if source_xy is None:
# Run the detection
if verbose:
print(' SEP: Extract...')
objects, seg = sep.extract(data_bkg, threshold, err=err,
mask=mask, segmentation_map=True,
**detection_params)
if verbose:
print(' Done.')
tab = utils.GTable(objects)
tab.meta['VERSION'] = (sep.__version__, 'SEP version')
# make unit-indexed like SExtractor
tab['x_image'] = tab['x']+1
tab['y_image'] = tab['y']+1
# ID
tab['number'] = np.arange(len(tab), dtype=np.int32)+1
tab['theta'] = np.clip(tab['theta'], -np.pi/2, np.pi/2)
for c in ['a', 'b', 'x', 'y', 'x_image', 'y_image', 'theta']:
tab = tab[np.isfinite(tab[c])]
# Segmentation
seg[mask] = 0
pyfits.writeto('{0}_seg.fits'.format(root), data=seg,
header=wcs_header, overwrite=True)
# WCS coordinates
if wcs is not None:
tab['ra'], tab['dec'] = wcs.all_pix2world(tab['x'], tab['y'], 0)
tab['ra'].unit = u.deg
tab['dec'].unit = u.deg
tab['x_world'], tab['y_world'] = tab['ra'], tab['dec']
if 'minarea' in detection_params:
tab.meta['MINAREA'] = (detection_params['minarea'],
'Minimum source area in pixels')
else:
tab.meta['MINAREA'] = (5, 'Minimum source area in pixels')
if 'clean' in detection_params:
tab.meta['CLEAN'] = (detection_params['clean'],
'Detection cleaning')
else:
tab.meta['CLEAN'] = (True, 'Detection cleaning')
if 'deblend_cont' in detection_params:
tab.meta['DEBCONT'] = (detection_params['deblend_cont'],
'Deblending contrast ratio')
else:
tab.meta['DEBCONT'] = (0.005, 'Deblending contrast ratio')
if 'deblend_nthresh' in detection_params:
tab.meta['DEBTHRSH'] = (detection_params['deblend_nthresh'],
'Number of deblending thresholds')
else:
tab.meta['DEBTHRSH'] = (32, 'Number of deblending thresholds')
if 'filter_type' in detection_params:
tab.meta['FILTER_TYPE'] = (detection_params['filter_type'],
'Type of filter applied, conv or weight')
else:
tab.meta['FILTER_TYPE'] = ('conv',
'Type of filter applied, conv or weight')
tab.meta['THRESHOLD'] = (threshold, 'Detection threshold')
# ISO fluxes (flux within segments)
iso_flux, iso_fluxerr, iso_area = get_seg_iso_flux(data_bkg, seg, tab,
err=err, verbose=1)
tab['flux_iso'] = iso_flux/uJy_to_dn*u.uJy
tab['fluxerr_iso'] = iso_fluxerr/uJy_to_dn*u.uJy
tab['area_iso'] = iso_area
tab['mag_iso'] = 23.9-2.5*np.log10(tab['flux_iso'])
# Compute FLUX_AUTO, FLUX_RADIUS
if compute_auto_quantities:
auto = compute_SEP_auto_params(data, data_bkg, mask,
pixel_scale=pixel_scale,
err=err, segmap=seg, tab=tab,
autoparams=autoparams, flux_radii=flux_radii,
subpix=subpix, verbose=verbose)
for k in auto.meta:
tab.meta[k] = auto.meta[k]
auto_flux_cols = ['flux_auto', 'fluxerr_auto', 'bkg_auto']
for c in auto.colnames:
if c in auto_flux_cols:
tab[c] = auto[c]/uJy_to_dn*u.uJy
else:
tab[c] = auto[c]
# Problematic sources
# bad = (tab['flux_auto'] <= 0) | (tab['flux_radius'] <= 0)
# bad |= (tab['kron_radius'] <= 0)
# tab = tab[~bad]
# Correction for flux outside Kron aperture
tot_corr = get_wfc3ir_kron_tot_corr(tab, drz_filter,
pixel_scale=pixel_scale,
photplam=drz_photplam)
tab['tot_corr'] = tot_corr
tab.meta['TOTCFILT'] = (drz_filter, 'Filter for tot_corr')
tab.meta['TOTCWAVE'] = (drz_photplam, 'PLAM for tot_corr')
total_flux = tab['flux_auto']*tot_corr
tab['mag_auto'] = 23.9-2.5*np.log10(total_flux)
tab['magerr_auto'] = 2.5/np.log(10)*(tab['fluxerr_auto']/tab['flux_auto'])
# More flux columns
for c in ['cflux', 'flux', 'peak', 'cpeak']:
tab[c] *= 1. / uJy_to_dn
tab[c].unit = u.uJy
source_x, source_y = tab['x'], tab['y']
# Use segmentation image to mask aperture fluxes
if aper_segmask:
aseg = seg
aseg_id = tab['number']
else:
aseg = aseg_id = None
# Rename some columns to look like SExtractor
for c in ['a', 'b', 'theta', 'cxx', 'cxy', 'cyy', 'x2', 'y2', 'xy']:
tab.rename_column(c, c+'_image')
else:
if len(source_xy) == 2:
source_x, source_y = source_xy
aseg, aseg_id = None, None
aper_segmask = False
else:
source_x, source_y, aseg, aseg_id = source_xy
aper_segmask = True
if hasattr(source_x, 'unit'):
if source_x.unit == u.deg:
# Input positions are ra/dec, convert with WCS
ra, dec = source_x, source_y
source_x, source_y = wcs.all_world2pix(ra, dec, 0)
tab = utils.GTable()
tab.meta['VERSION'] = (sep.__version__, 'SEP version')
# Info
tab.meta['ZP'] = (ZP, 'AB zeropoint')
if 'PHOTPLAM' in im[0].header:
tab.meta['PLAM'] = (im[0].header['PHOTPLAM'], 'AB zeropoint')
if 'PHOTFNU' in im[0].header:
tab.meta['FNU'] = (im[0].header['PHOTFNU'], 'AB zeropoint')
tab.meta['FLAM'] = (im[0].header['PHOTFLAM'], 'AB zeropoint')
tab.meta['uJy2dn'] = (uJy_to_dn, 'Convert uJy fluxes to image DN')
tab.meta['DRZ_FILE'] = (drz_file, 'SCI file')
tab.meta['WHT_FILE'] = (weight_file, 'WHT file')
tab.meta['GET_BACK'] = (get_background, 'Background computed')
for k in bkg_params:
tab.meta[f'BACK_{k.upper()}'] = (bkg_params[k],
f'Background param {k}')
tab.meta['ERR_SCALE'] = (err_scale, 'Scale factor applied to weight image (like MAP_WEIGHT)')
tab.meta['RESCALEW'] = (rescale_weight, 'Was the weight applied?')
tab.meta['APERMASK'] = (aper_segmask, 'Mask apertures with seg image')
# Photometry
for iap, aper in enumerate(apertures):
if sep.__version__ > '1.03':
# Should work with the sep fork at gbrammer/sep and latest sep
flux, fluxerr, flag = sep.sum_circle(data_bkg,
source_x, source_y,
aper/2, err=err,
gain=gain, subpix=subpix,
segmap=aseg, seg_id=aseg_id,
mask=mask)
else:
tab.meta['APERMASK'] = (False, 'Mask apertures with seg image - Failed')
flux, fluxerr, flag = sep.sum_circle(data_bkg,
source_x, source_y,
aper/2, err=err,
gain=gain, subpix=subpix,
mask=mask)
tab.meta['GAIN'] = gain
tab['flux_aper_{0}'.format(iap)] = flux/uJy_to_dn*u.uJy
tab['fluxerr_aper_{0}'.format(iap)] = fluxerr/uJy_to_dn*u.uJy
tab['flag_aper_{0}'.format(iap)] = flag
if get_background:
try:
flux, fluxerr, flag = sep.sum_circle(bkg_data,
source_x, source_y,
aper/2, err=None, gain=1.0,
segmap=aseg, seg_id=aseg_id,
mask=mask)
except:
flux, fluxerr, flag = sep.sum_circle(bkg_data,
source_x, source_y,
aper/2, err=None, gain=1.0,
mask=mask)
tab['bkg_aper_{0}'.format(iap)] = flux/uJy_to_dn*u.uJy
else:
tab['bkg_aper_{0}'.format(iap)] = 0.*u.uJy
# Count masked pixels in the aperture, not including segmask
flux, fluxerr, flag = sep.sum_circle(data_mask,
source_x, source_y,
aper/2, err=err,
gain=gain, subpix=subpix)
tab['mask_aper_{0}'.format(iap)] = flux
tab.meta['aper_{0}'.format(iap)] = (aper, 'Aperture diameter, pix')
tab.meta['asec_{0}'.format(iap)] = (aper*pixel_scale,
'Aperture diameter, arcsec')
# # If blended, use largest aperture magnitude
# if 'flag' in tab.colnames:
# last_flux = tab['flux_aper_{0}'.format(iap)]
# last_fluxerr = tab['fluxerr_aper_{0}'.format(iap)]
#
# blended = (tab['flag'] & 1) > 0
#
# total_corr = tab['flux_auto']/last_flux
# blended |= total_corr > max_total_corr
#
# tab['flag'][blended] |= 1024
#
# aper_mag = 23.9 - 2.5*np.log10(last_flux)
# aper_magerr = 2.5/np.log(10)*last_fluxerr/last_flux
#
# tab['mag_auto'][blended] = aper_mag[blended]
# tab['magerr_auto'][blended] = aper_magerr[blended]
#
# # "ISO" mag, integrated within the segment
# tab['mag_iso'] = 23.9-2.5*np.log10(tab['flux'])
try:
# Free memory objects explicitly
del(data_mask)
del(data)
del(err)
except:
pass
# if uppercase_columns:
for c in tab.colnames:
tab.rename_column(c, column_case(c))
if save_to_fits:
tab.write(f'{root}.cat.fits', format='fits', overwrite=True)
if include_wcs_extension:
try:
hdul = pyfits.open(f'{root}.cat.fits', mode='update')
wcs_hdu = pyfits.ImageHDU(header=wcs_header, data=None,
name='WCS')
hdul.append(wcs_hdu)
hdul.flush()
except:
pass
logstr = '# SEP {0}.cat.fits: {1:d} objects'.format(root, len(tab))
utils.log_comment(utils.LOGFILE, logstr, verbose=verbose)
return tab
def get_seg_iso_flux(data, seg, tab, err=None, fill=None, verbose=0):
"""
Integrate flux within the segmentation regions.
Parameters
----------
data : 2D array
Image data
seg : 2D array
Segmentation image defining the ISO contours
tab : `~astropy.table.Table`
Detection catalog with columns (at least) ``number / id``,
``xmin``, ``xmax``, ``ymin``, ``ymax``. The ``id`` column matches the
values in `seg`.
err : 2D array
Optional uncertainty array
fill : None, array
If specified, create an image where the image segments are filled
with scalar values for a given object rather than computing the ISO
fluxes
verbose : bool/int
Status messages
Returns
-------
iso_flux : array
Summed image flux within the contours defined by the `seg` map
iso_err : array
Uncertainty if `err` specified
iso_area : array
Area of the segments, in pixels
filled_data : 2D array
If `fill` specified, return an image with values filled within the
segments, e.g., for a binned image
"""
if 'number' in tab.colnames:
ids = np.array(tab['number'])
else:
ids = np.array(tab['id'])
sh = data.shape
iso_flux = ids*0.
iso_err = ids*0.
iso_area = np.cast[int](ids*0)
xmin = np.clip(tab['xmin'], 0, sh[1])
xmax = np.clip(tab['xmax'], 0, sh[1])
ymin = np.clip(tab['ymin'], 0, sh[0])
ymax = np.clip(tab['ymax'], 0, sh[0])
if fill is not None:
filled_data = np.cast[fill.dtype](seg*0)
for ii, id in enumerate(ids):
if (verbose > 1):
if (ii % verbose == 0):
print(' {0}'.format(ii))
slx = slice(xmin[ii], xmax[ii])
sly = slice(ymin[ii], ymax[ii])
seg_sub = seg[sly, slx]
seg_mask = (seg_sub == id)
if fill is not None:
#print(ii, seg_mask.sum())
filled_data[sly, slx][seg_mask] = fill[ii]
else:
data_sub = data[sly, slx]
iso_flux[ii] = data_sub[seg_mask].sum()
iso_area[ii] = seg_mask.sum()
if err is not None:
err_sub = err[sly, slx]
iso_err[ii] = np.sqrt((err_sub[seg_mask]**2).sum())
if fill is not None:
return filled_data
else:
return iso_flux, iso_err, iso_area
def compute_SEP_auto_params(data, data_bkg, mask, pixel_scale=0.06, err=None, segmap=None, tab=None, grow_kron=6.0, autoparams=[2.5, 0.35*u.arcsec, 2.4, 3.8], flux_radii=[0.2, 0.5, 0.9], subpix=0, verbose=True):
"""Compute SourceExtractor-like AUTO params with `sep`
https://sep.readthedocs.io/en/v1.0.x/apertures.html#equivalent-of-flux-auto-e-g-mag-auto-in-source-extractor
Parameters
----------
data : 2D array
Image data
data_bkg : 2D array
Background-subtracted image data
mask : 2D array
Pixel mask
pixel_scale : float
Pixel scale, arcsec
err : 2D array
Uncertainty array
segmap : 2D array
Associated segmentation map
tab : `~astropy.table.Table`
Table from, e.g., `sep.extract`.
grow_kron : float
Scale by which semimajor and semiminor axes are multiplied for
calculating the Kron moment. This is hard-coded as `grow_kron=6.0`
in `SourceExtractor <https://sextractor.readthedocs.io/en/latest/Photom.html>`_.
autoparams : list
Provided as ``[k, MIN_APER, MIN_KRON, MAX_KRON]``, where the usual
SourceExtractor ``PHOT_AUTOPARAMS`` would be ``[k, MIN_KRON]``. Here,
``k`` is the scale factor of the Kron radius, and ``MIN_KRON`` is the
minimum scaled Kron radius to use. ``MIN_APER`` is then the smallest
*circular* Kron aperture to allow, which can be provided with attached
units (e.g. ``arcsec``).
flux_radii : list
Light fraction radii, e.g., ``[0.5]`` will calculate the half-light
radius (``FLUX_RADIUS``)
subpix : int
Pixel oversampling with the `sep` aperture functions
Returns
-------
auto : `~astropy.table.Table`
Table with the derived parameters
+--------------------------+----------------------------------------+
| Column | Description |
+==========================+========================================+
| `kron_radius` | Kron radius, pixels |
+--------------------------+----------------------------------------+
| `kron_rcirc` | Circularized Kron radius, pixels |
+--------------------------+----------------------------------------+
| `flux_auto` | Flux within AUTO aperture |
+--------------------------+----------------------------------------+
| `fluxerr_auto` | Uncertainty within AUTO aperture |
+--------------------------+----------------------------------------+
| `bkg_auto` | Background within AUTO aperture |
+--------------------------+----------------------------------------+
| `flag_auto` | `sep` flags for AUTO aperture |
+--------------------------+----------------------------------------+
| `area_auto` | Pixel area of AUTO aperture |
+--------------------------+----------------------------------------+
"""
import sep
logstr = 'compute_SEP_auto_params: sep version = {0}'.format(sep.__version__)
utils.log_comment(utils.LOGFILE, logstr, verbose=verbose)
if sep.__version__ < '1.1':
print("""!!!!!!!!!!
! SEP version = {0}
! Get >= 1.10.0 to enable segmentation masking, e.g.,
! $ pip install git+https://github.com/gbrammer/sep.git
!!!!!!!!!!
""".format(sep.__version__))
logstr = 'compute_SEP_auto_params: autoparams={0}; pixel_scale={1}; subpix={2}; flux_radii={3}'.format(autoparams, pixel_scale, subpix, flux_radii)
utils.log_comment(utils.LOGFILE, logstr, verbose=verbose)
# Check datatype of seg
segb = segmap
if segmap is not None:
if segmap.dtype == np.dtype('>i4'):
segb = segmap.byteswap().newbyteorder()
if 'a_image' in tab.colnames:
x, y = tab['x_image']-1, tab['y_image']-1
a, b = tab['a_image'], tab['b_image']
theta = tab['theta_image']
else:
x, y, a, b = tab['x'], tab['y'], tab['a'], tab['b']
theta = tab['theta']
if 'number' in tab.colnames:
seg_id = tab['number']
else:
seg_id = tab['id']
# Kron radius
try:
# Try with seg mask (sep > v1.0.4)
kronrad, krflag = sep.kron_radius(data_bkg, x, y, a, b, theta,
grow_kron, mask=mask,
segmap=segb, seg_id=seg_id)
kronrad[~np.isfinite(kronrad)] = 0
except:
logstr = 'sep.kron_radius: ! Warning ! couldn\'t run with seg mask'
utils.log_comment(utils.LOGFILE, logstr, verbose=True)
kronrad, krflag = sep.kron_radius(data_bkg, x, y, a, b, theta,
grow_kron, mask=mask)
# This is like SExtractor PHOT_AUTOPARAMS[0]
kronrad *= autoparams[0]
# This is like SExtractor PHOT_AUTOPARAMS[1]
if len(autoparams) > 2:
clip_kron0 = autoparams[2]
kronrad = np.maximum(kronrad, clip_kron0)
else:
clip_kron0 = 0.
if len(autoparams) > 3:
clip_kron1 = autoparams[3]
kronrad = np.minimum(kronrad, clip_kron1)
else:
clip_kron1 = 1000.
# Circularized Kron radius (equivalent to kronrad * a * sqrt(b/a))
kroncirc = kronrad * np.sqrt(a*b)
# Minimum Kron radius
if hasattr(autoparams[1], 'unit'):
min_radius_pix = autoparams[1].to(u.arcsec).value/pixel_scale
else:
# Assume arcsec
min_radius_pix = autoparams[1]/pixel_scale
kron_min = kroncirc <= min_radius_pix
kron_flux = x*0.
kron_bkg = x*0.
kron_fluxerr = x*0.
kron_flag = np.zeros(len(x), dtype=int)
kron_area = np.pi*np.maximum(kroncirc, min_radius_pix)**2
########
# Ellipse photometry in apertures larger than the minimum
# Extract on both data and background subtracted to compute the
# background within the aperture
try:
# Try with seg mask (sep=>v1.0.4)
kout0 = sep.sum_ellipse(data, x[~kron_min], y[~kron_min],
a[~kron_min], b[~kron_min], theta[~kron_min],
kronrad[~kron_min], subpix=subpix, err=None,
segmap=segb, seg_id=seg_id[~kron_min],
mask=mask)
kout = sep.sum_ellipse(data_bkg, x[~kron_min], y[~kron_min],
a[~kron_min], b[~kron_min], theta[~kron_min],
kronrad[~kron_min], subpix=subpix, err=err,
segmap=segb, seg_id=seg_id[~kron_min], mask=mask)
except:
kout0 = sep.sum_ellipse(data_bkg, x[~kron_min], y[~kron_min],
a[~kron_min], b[~kron_min], theta[~kron_min],
kronrad[~kron_min], subpix=subpix, err=None,
mask=mask)
kout = sep.sum_ellipse(data_bkg, x[~kron_min], y[~kron_min],
a[~kron_min], b[~kron_min], theta[~kron_min],
kronrad[~kron_min], subpix=subpix, err=err,
mask=mask)
kron_flux[~kron_min] = kout[0]
kron_bkg[~kron_min] = kout0[0]-kout[0]
kron_fluxerr[~kron_min] = kout[1]
kron_flag[~kron_min] = kout[2]
# Circular apertures below minimum size
try:
# Try with seg mask (sep v1.0.4)
kout0 = sep.sum_circle(data, x[kron_min], y[kron_min],
min_radius_pix, subpix=subpix, err=None,
segmap=segb, seg_id=seg_id[kron_min], mask=mask)
kout = sep.sum_circle(data_bkg, x[kron_min], y[kron_min],
min_radius_pix, subpix=subpix, err=err,
segmap=segb, seg_id=seg_id[kron_min], mask=mask)
except:
kout0 = sep.sum_circle(data, x[kron_min], y[kron_min],
min_radius_pix, subpix=subpix, err=None,
mask=mask)
kout = sep.sum_circle(data_bkg, x[kron_min], y[kron_min],
min_radius_pix, subpix=subpix, err=err,
mask=mask)
kron_flux[kron_min] = kout[0]
kron_bkg[kron_min] = kout0[0]-kout[0]
kron_fluxerr[kron_min] = kout[1]
kron_flag[kron_min] = kout[2]
#############
# Flux radius
try:
fr, fr_flag = sep.flux_radius(data_bkg, x, y, a*grow_kron, flux_radii,
normflux=kron_flux, mask=mask,
segmap=segb, seg_id=seg_id)
except:
fr, fr_flag = sep.flux_radius(data_bkg, x, y, a*grow_kron, flux_radii,
normflux=kron_flux, mask=mask)
auto = utils.GTable()
auto.meta['KRONFACT'] = (autoparams[0], 'Kron radius scale factor')
auto.meta['KRON0'] = (clip_kron0, 'Minimum scaled Kron radius')
auto.meta['KRON1'] = (clip_kron1, 'Maximum scaled Kron radius')
auto.meta['MINKRON'] = (min_radius_pix, 'Minimum Kron aperture, pix')
auto['kron_radius'] = kronrad*u.pixel
auto['kron_rcirc'] = kroncirc*u.pixel
auto['flux_auto'] = kron_flux
auto['fluxerr_auto'] = kron_fluxerr
auto['bkg_auto'] = kron_bkg
auto['flag_auto'] = kron_flag
auto['area_auto'] = kron_area
auto['flux_radius_flag'] = fr_flag
for i, r_i in enumerate(flux_radii):
c = 'flux_radius_{0:02d}'.format(int(np.round(r_i*100)))
if c.endswith('_50'):
c = 'flux_radius'
auto[c] = fr[:, i]
return auto
def get_filter_ee_ratio(tab, filter, ref_filter='f160w'):
"""
Relative EE correction within a specified aperture, in arcsec.
"""
pixel_scale = tab.meta['ASEC_0']/tab.meta['APER_0']
min_kron = tab.meta['MINKRON']*pixel_scale
ee = utils.read_catalog((os.path.join(os.path.dirname(utils.__file__),
'data', 'hst_encircled_energy.fits')))
# Reference filter
ref_obsmode = utils.get_filter_obsmode(filter=ref_filter, acs_chip='wfc1',
uvis_chip='uvis2', aper=np.inf,
case=str.lower)
# Target filter
obsmode = utils.get_filter_obsmode(filter=filter, acs_chip='wfc1',
uvis_chip='uvis2', aper=np.inf,
case=str.lower)
# Filter not available
if obsmode not in ee.colnames:
return 1.
# Ratio of photometric aperture to kron aperture
keys = list(tab.meta.keys())
for ka in keys:
if ka.startswith('APER_'):
ap = ka.split('_')[1]
aper_radius = tab.meta[f'ASEC_{ap}']/2.
kron_circ = np.maximum(tab['kron_rcirc']*pixel_scale, min_kron)
filt_kron = np.interp(kron_circ, ee['radius'], ee[obsmode])
filt_aper = np.interp(aper_radius, ee['radius'], ee[obsmode])
ref_kron = np.interp(kron_circ, ee['radius'], ee[ref_obsmode])
ref_aper = np.interp(aper_radius, ee['radius'], ee[ref_obsmode])
filter_correction = (filt_kron/filt_aper) / (ref_kron/ref_aper)
tab[f'{filter}_ee_{ap}'] = filter_correction
return tab
def get_hst_aperture_correction(filter, raper=0.35, rmax=5.):
"""
Get single aperture correction from tabulated EE curve
"""
ee = utils.read_catalog((os.path.join(os.path.dirname(__file__),
'data', 'hst_encircled_energy.fits')))
obsmode = utils.get_filter_obsmode(filter=filter, acs_chip='wfc1',
uvis_chip='uvis2', aper=np.inf,
case=str.lower)
ee_rad = np.append(ee['radius'], rmax)
ee_y = np.append(ee[obsmode], 1.)
ee_interp = np.interp(raper, ee_rad, ee_y, left=0.01, right=1.)
return ee.meta['ZP_'+obsmode], ee_interp
def get_kron_tot_corr(tab, filter, inst=None, pixel_scale=0.06, photplam=None, rmax=5.0):
"""
Compute total correction from tabulated EE curves
"""
ee = utils.read_catalog((os.path.join(os.path.dirname(__file__),
'data', 'hst_encircled_energy.fits')))
obsmode = utils.get_filter_obsmode(filter=filter, acs_chip='wfc1',
uvis_chip='uvis2', aper=np.inf,
case=str.lower)
min_kron = float(np.atleast_1d(tab.meta['MINKRON'])[0])
if pixel_scale is None:
try:
pixel_scale = tab.meta['ASEC_0']/tab.meta['APER_0']
except:
pixel_scale = tab.meta['asec_0'][0]/tab.meta['aper_0'][0]
if 'kron_rcirc' in tab.colnames:
kron_raper = np.clip(tab['kron_rcirc']*pixel_scale,
min_kron*pixel_scale, rmax)
else:
kron_raper = np.clip(tab['KRON_RCIRC']*pixel_scale,
min_kron*pixel_scale, rmax)
# Filter not available
if obsmode not in ee.colnames:
return kron_raper*0.+1
else:
ee_rad = np.append(ee['radius'], rmax)
ee_y = np.append(ee[obsmode], 1.)
ee_interp = np.interp(kron_raper, ee_rad, ee_y, left=0.01, right=1.)
return 1./ee_interp
def get_wfc3ir_kron_tot_corr(tab, filter, pixel_scale=0.06, photplam=None, rmax=5.):
"""
Compute total correction from WFC3/IR EE curves
.. note::
Deprecated, use `~grizli.utils.get_kron_tot_corr`.
"""
ee_raw = np.loadtxt((os.path.join(os.path.dirname(__file__),
'data', 'wfc3ir_ee.txt')))
ee_data = ee_raw[1:, 1:]
ee_wave = ee_raw[0, 1:]
ee_rad = ee_raw[1:, 0]
kron_raper = np.clip(tab['kron_rcirc']*pixel_scale,
tab.meta['MINKRON'][0]*pixel_scale, rmax)
if (filter.lower()[:2] not in ['f0', 'f1']) & (photplam is None):
return kron_raper*0.+1
if photplam is None:
wum = int(filter[1:-1])*100/1.e4
else:
wum = photplam/1.e4
if wum < 0.9:
return kron_raper*0.+1
xi = np.interp(wum, ee_wave, np.arange(len(ee_wave)))
i0 = int(xi)
fi = 1-(xi-i0)
ee_y = ee_data[:, i0:i0+2].dot([fi, 1-fi])
ee_rad = np.append(ee_rad, rmax)
ee_y = np.append(ee_y, 1.)
ee_interp = np.interp(kron_raper, ee_rad, ee_y, left=0.01, right=1.)
return 1./ee_interp
def make_drz_catalog(root='', sexpath='sex', threshold=2., get_background=True,
verbose=True, extra_config={}, sci=None, wht=None,
get_sew=False, output_params=SEXTRACTOR_DEFAULT_PARAMS,
phot_apertures=SEXTRACTOR_PHOT_APERTURES,
column_case=str.upper):
"""Make a SExtractor catalog from drizzle products
.. note::
Deprecated, use `~grizli.utils.make_SEP_catalog`.
"""
import copy
import sewpy
if sci is not None:
drz_file = sci
else:
drz_file = glob.glob('{0}_dr[zc]_sci.fits'.format(root))[0]
im = pyfits.open(drz_file)
if 'PHOTFNU' in im[0].header:
ZP = -2.5*np.log10(im[0].header['PHOTFNU'])+8.90
elif 'PHOTFLAM' in im[0].header:
ZP = (-2.5*np.log10(im[0].header['PHOTFLAM']) - 21.10 -
5*np.log10(im[0].header['PHOTPLAM']) + 18.6921)
elif 'FILTER' in im[0].header:
fi = im[0].header['FILTER'].upper()
if fi in model.photflam_list:
ZP = (-2.5*np.log10(model.photflam_list[fi]) - 21.10 -
5*np.log10(model.photplam_list[fi]) + 18.6921)
else:
print('Couldn\'t find PHOTFNU or PHOTPLAM/PHOTFLAM keywords, use ZP=25')
ZP = 25
else:
print('Couldn\'t find FILTER, PHOTFNU or PHOTPLAM/PHOTFLAM keywords, use ZP=25')
ZP = 25
if verbose:
print('Image AB zeropoint: {0:.3f}'.format(ZP))
weight_file = drz_file.replace('_sci.fits', '_wht.fits').replace('_drz.fits', '_wht.fits')
if (weight_file == drz_file) | (not os.path.exists(weight_file)):
WEIGHT_TYPE = "NONE"
else:
WEIGHT_TYPE = "MAP_WEIGHT"
if wht is not None:
weight_file = wht
config = OrderedDict(DETECT_THRESH=threshold, ANALYSIS_THRESH=threshold,
DETECT_MINAREA=6,
PHOT_FLUXFRAC="0.5",
WEIGHT_TYPE=WEIGHT_TYPE,
WEIGHT_IMAGE=weight_file,
CHECKIMAGE_TYPE="SEGMENTATION",
CHECKIMAGE_NAME='{0}_seg.fits'.format(root),
MAG_ZEROPOINT=ZP,
CLEAN="N",
PHOT_APERTURES=phot_apertures,
BACK_SIZE=32,
PIXEL_SCALE=0,
MEMORY_OBJSTACK=30000,
MEMORY_PIXSTACK=3000000,
MEMORY_BUFSIZE=8192)
if get_background:
config['CHECKIMAGE_TYPE'] = 'SEGMENTATION,BACKGROUND'
config['CHECKIMAGE_NAME'] = '{0}_seg.fits,{0}_bkg.fits'.format(root)
else:
config['BACK_TYPE'] = 'MANUAL'
config['BACK_VALUE'] = 0.
for key in extra_config:
config[key] = extra_config[key]
params = copy.copy(output_params)
NAPER = len(phot_apertures.split(','))
if NAPER == 1:
if not phot_apertures.split(',')[0]:
NAPER = 0
if NAPER > 0:
params.extend(['FLUX_APER({0})'.format(NAPER),
'FLUXERR_APER({0})'.format(NAPER)])
# if NAPER > 1:
# for i in range(NAPER-1):
# params.extend(['FLUX_APER{0}'.format(i+1),
# 'FLUXERR_APER{0}'.format(i+1)])
sew = sewpy.SEW(params=params, config=config)
if get_sew:
return sew
output = sew(drz_file)
cat = output['table']
cat.meta = config
for c in cat.colnames:
cat.rename_column(c, column_case(c))
cat.write('{0}.cat'.format(root), format='ascii.commented_header',
overwrite=True)
logstr = '# DRZ {0} catalog: {1:d} objects'.format(root, len(cat))
utils.log_comment(utils.LOGFILE, logstr, verbose=verbose)
return cat
# bin widths defined in pixels with scale `pixel_scale
BLOT_BACKGROUND_PARAMS = {'bw': 64, 'bh': 64, 'fw': 3, 'fh': 3,
'pixel_scale': 0.06}
def blot_background(visit={'product': '', 'files': None},
bkg_params=BLOT_BACKGROUND_PARAMS,
verbose=True, skip_existing=True, get_median=False,
log=True, stepsize=10, **kwargs):
"""
Blot SEP background of drizzled image back to component FLT images
Parameters
----------
visit : dict
Dictionary defining the drizzle product ('product' key) and
associated FLT files that contribute to the drizzled mosaic ('files'
list)
bkg_params : dict
Parameters for `sep.Background`
verbose : bool
Status messages
skip_existing : bool
Don't run if ``BLOTSKY`` keyword found in the FLT header
get_median : bool
Don't use full background but rather just use (masked) median value
of the drizzled mosaic
log : bool
Write log information to `grizli.utils.LOGFILE`
stepsize : int
Parameter for `blot`
Returns
-------
Nothing returned but subtracts the transformed background image directly
from the FLT files and updates header keywords
"""
if log:
frame = inspect.currentframe()
utils.log_function_arguments(utils.LOGFILE, frame,
'prep.blot_background')
import astropy.io.fits as pyfits
import astropy.wcs as pywcs
from drizzlepac import astrodrizzle
drz_files = glob.glob('{0}_dr[zc]_sci.fits'.format(visit['product']))
if len(drz_files) == 0:
logstr = '# blot_background: No mosaic found {0}_dr[zc]_sci.fits'
logstr = logstr.format(visit['product'])
utils.log_comment(utils.LOGFILE, logstr, verbose=True)
return False
drz_file = drz_files[0]
drz_im = pyfits.open(drz_file)
drz_unit = drz_im[0].header['BUNIT']
drz_wcs = pywcs.WCS(drz_im[0].header)
drz_wcs.pscale = utils.get_wcs_pscale(drz_wcs)
# Get SEP background
bkg_data = make_SEP_catalog(root=visit['product'], bkg_only=True,
bkg_params=bkg_params, verbose=False)
if get_median:
mask = drz_im[0].data != 0
bkg_data = bkg_data*0.+np.median(np.median(bkg_data[mask]))
logstr = '# Blot background from {0}'.format(drz_file)
utils.log_comment(utils.LOGFILE, logstr, verbose=verbose)
for file in visit['files']:
flt = pyfits.open(file, mode='update')
for ext in range(1, 5):
if ('SCI', ext) not in flt:
continue
if ('BLOTSKY' in flt['SCI', ext].header) & (skip_existing):
print(f'\'BLOTSKY\' keyword found in {file}. Skipping....')
continue
logstr = '# Blot background: {0}[SCI,{1}]'.format(file, ext)
utils.log_comment(utils.LOGFILE, logstr, verbose=verbose)
flt_wcs = pywcs.WCS(flt['SCI', ext].header, fobj=flt, relax=True)
flt_wcs.pscale = utils.get_wcs_pscale(flt_wcs)
blotted = utils.blot_nearest_exact(bkg_data.astype(np.float32),
drz_wcs, flt_wcs,
stepsize=stepsize,
scale_by_pixel_area=True)
flt_unit = flt['SCI', ext].header['BUNIT']
if flt_unit+'/S' == drz_unit:
tscale = flt[0].header['EXPTIME']
elif flt_unit == drz_unit + '/S':
tscale = 1./flt[0].header['EXPTIME']
else:
tscale = 1.
flt['SCI', ext].data -= blotted*tscale
flt['SCI', ext].header['BLOTSKY'] = (True,
'Sky blotted from SKYIMAGE')
flt['SCI', ext].header['SKYIMAGE'] = (drz_file,
'Source image for sky')
flt['SCI', ext].header['SKYBW'] = (bkg_params['bw'],
'Sky bkg_params')
flt['SCI', ext].header['SKYBH'] = (bkg_params['bh'],
'Sky bkg_params')
flt['SCI', ext].header['SKYFW'] = (bkg_params['fw'],
'Sky bkg_params')
flt['SCI', ext].header['SKYFH'] = (bkg_params['fh'],
'Sky bkg_params')
flt['SCI', ext].header['SKYPIX'] = (bkg_params['pixel_scale'],
'Sky bkg_params, pixel_scale')
flt.flush()
return True
def separate_chip_sky(visit, filters=['F200LP','F350LP','F600LP','F390W'], stepsize=10, statistic=np.nanmedian, by_amp=True, only_flc=True, row_average=True, average_order=11, seg_dilate=16, **kwargs):
"""
Get separate background values for each chip. Updates 'CHIPSKY' header
keyword for each SCI extension of the visit exposure files.
Parameters
----------
visit : dict
List of visit information from `~grizli.utils.parse_flt_files`.
filters : list
Only run if the exposures in `visit['files']` use a filter in this
list, e.g., less-common WFC3/UVIS filters
stepsize : int
Parameter for blot
statistic : func
Test statistic on (masked) image data from each extension
by_amp : bool
Compute stats by amp (half-chip) subregions (*not implemented*)
only_flc : True
Only run if `visit['files'][0]` has "flc" extension
Returns
-------
status : bool
Runtime status, True if executed
"""
frame = inspect.currentframe()
utils.log_function_arguments(utils.LOGFILE, frame,
'prep.separate_chip_sky')
import astropy.io.fits as pyfits
import astropy.wcs as pywcs
from drizzlepac import astrodrizzle
import scipy.ndimage as nd
if ('flc' not in visit['files'][0]) & only_flc:
return False
flt = pyfits.open(visit['files'][0])
filt_i = utils.get_hst_filter(flt[0].header)
if filt_i not in filters:
logstr = f'# separate_chip_sky: {filt_i} not in {filters} for '
logstr += "'{0}'".format(visit['product'])
utils.log_comment(utils.LOGFILE, logstr, verbose=True)
return False
seg_files = glob.glob('{0}_seg.fits*'.format(visit['product']))
if len(seg_files) == 0:
logstr = '# separate_chip_sky: No segimage found {0}_seg.fits'
logstr = logstr.format(visit['product'])
utils.log_comment(utils.LOGFILE, logstr, verbose=True)
seg_file = seg_files[0]
seg_im = pyfits.open(seg_file)
seg_mask = nd.binary_dilation(seg_im[0].data > 0, iterations=seg_dilate)*1
seg_wcs = pywcs.WCS(seg_im[0].header)
seg_wcs.pscale = utils.get_wcs_pscale(seg_wcs)
if row_average:
row_num = {}
row_den = {}
make_fig = True
if make_fig:
fig, axes = plt.subplots(2,1,figsize=(8,8))
for file in visit['files']:
flt = pyfits.open(file, mode='update')
for ext in range(1, 5):
if ('SCI', ext) not in flt:
continue
flt_wcs = pywcs.WCS(flt['SCI', ext].header, fobj=flt, relax=True)
flt_wcs.pscale = utils.get_wcs_pscale(flt_wcs)
blotted = utils.blot_nearest_exact(seg_mask,
seg_wcs, flt_wcs,
stepsize=stepsize,
scale_by_pixel_area=False)
nonzero = flt['SCI',ext].data != 0
ok = (flt['DQ',ext].data == 0) & (blotted <= 0)
ok &= nonzero
stat = statistic(flt['SCI',ext].data[ok])
if row_average:
print(file, ext, stat)
wht = 1/(flt['ERR',ext].data)**2
wht[~ok] = 0
filled = (flt['SCI',ext].data - stat)/stat
filled[~(ok & nonzero)] = np.nan
rows = np.nanmedian(filled, axis=1)
rows[~np.isfinite(rows)] = 0
if ext not in row_num:
row_num[ext] = rows
row_den[ext] = (rows != 0)*1
else:
row_num[ext] += rows
row_den[ext] += (rows != 0)*1
if make_fig:
axes[ext-1].plot(rows, alpha=0.5)
fig.tight_layout(pad=0.5)
fig.savefig('/tmp/rows.png')
###############
if 'MDRIZSKY' in flt['SCI',ext].header:
stat -= flt['SCI',ext].header['MDRIZSKY']
logstr = f'# separate_chip_sky {filt_i}: '
logstr += f'{file}[SCI,{ext}] = {stat:6.2f}'
utils.log_comment(utils.LOGFILE, logstr, verbose=True)
if 'CHIPSKY' in flt['SCI',ext].header:
flt['SCI',ext].header['CHIPSKY'] += stat
else:
flt['SCI',ext].header['CHIPSKY'] = (stat, 'Chip-level sky')
flt['SCI',ext].data -= nonzero*stat
flt.flush()
if row_average:
row_avg = {}
row_model = {}
for ext in row_num:
row_avg[ext] = row_num[ext]/row_den[ext]
row_avg[ext][row_den[ext] <= 0] = np.nan
if make_fig:
axes[ext-1].plot(row_avg[ext], alpha=0.5, color='k')
fig.tight_layout(pad=0.5)
msk = np.isfinite(row_avg[ext])
xi = np.linspace(0,1,row_avg[ext].size)
#deg = 11
for _iter in range(5):
cc = np.polynomial.chebyshev.chebfit(xi[msk],
row_avg[ext][msk],
average_order,
rcond=None,
full=False, w=None)
row_model[ext] = np.polynomial.chebyshev.chebval(xi, cc)
msk = np.isfinite(row_avg[ext])
msk &= np.abs(row_avg[ext] - row_model[ext]) < 3*utils.nmad(row_avg[ext][msk])
if make_fig:
axes[ext-1].plot(row_model[ext], color='r')
fig.savefig('/tmp/rows.png')
for file in visit['files']:
flt = pyfits.open(file, mode='update')
for ext in range(1, 5):
if ('SCI', ext) not in flt:
continue
nonzero = flt['SCI',ext].data != 0
stat = flt['SCI',ext].header['CHIPSKY']*1
if 'MDRIZSKY' in flt['SCI',ext].header:
stat += flt['SCI',ext].header['MDRIZSKY']
row_avg_ext = nonzero * row_avg[ext][:,None] * stat
flt['SCI',ext].data -= row_avg_ext
flt['SCI',ext].header['ROWSKY'] = (True,
'Row-averaged sky removed')
flt.flush()
return True
def add_external_sources(root='', maglim=20, fwhm=0.2, catalog='2mass'):
"""Add Gaussian sources in empty parts of an image derived from an external catalog
Parameters
----------
root : type
hlim : type
Returns
-------
savesimages : type
"""
from astropy.modeling import models
sci_file = glob.glob('{0}_dr[zc]_sci.fits'.format(root))[0]
wht_file = glob.glob('{0}_dr[zc]_wht.fits'.format(root))[0]
sci = pyfits.open(sci_file)
wht = pyfits.open(wht_file)
sh = sci[0].data.shape
yp, xp = np.indices(sh)
PHOTPLAM = sci[0].header['PHOTPLAM']
PHOTFLAM = sci[0].header['PHOTFLAM']
ZP = -2.5*np.log10(PHOTFLAM) - 21.10 - 5*np.log10(PHOTPLAM) + 18.6921
wcs = pywcs.WCS(sci[0])
pscale = utils.get_wcs_pscale(wcs)
rd = wcs.all_pix2world(np.array([[sh[1]/2], [sh[0]/2]]).T, 0)[0]
radius = np.sqrt(2)*np.maximum(sh[0], sh[1])/2.*pscale/60.
if catalog == '2mass':
cat = get_irsa_catalog(rd[0], rd[1], radius=radius, twomass=True)
cat['mag'] = cat['h_m']+1.362 # AB
table_to_regions(cat, '{0}_2mass.reg'.format(root))
elif catalog == 'panstarrs':
cat = get_panstarrs_catalog(rd[0], rd[1], radius=radius)
# cat['mag'] = cat['rMeanKronMag']+0.14 # AB
cat['mag'] = cat['iMeanKronMag']+0.35 # AB
table_to_regions(cat, '{0}_panstarrs.reg'.format(root))
elif catalog == 'ukidss':
cat = get_ukidss_catalog(rd[0], rd[1], radius=radius)
cat['mag'] = cat['HAperMag3']+1.362 # AB
cat.rename_column('RA', 'ra')
cat.rename_column('Dec', 'dec')
table_to_regions(cat, '{0}_ukidss.reg'.format(root))
elif catalog == 'gaia':
cat = get_gaia_DR2_vizier(rd[0], rd[1], radius=radius)
cat['mag'] = np.minimum(cat['phot_g_mean_mag'], 19)-2
table_to_regions(cat, '{0}_gaia.reg'.format(root))
else:
print('Not a valid catalog: ', catalog)
return False
cat = cat[(cat['mag'] < maglim) & (cat['mag'] > 0)]
print('{0}: {1} objects'.format(catalog, len(cat)))
if len(cat) == 0:
return False
xy = wcs.all_world2pix(cat['ra'], cat['dec'], 0)
flux = sci[0].data*0.
N = len(cat)
for i in range(N):
print('Add object {0:3d}/{1:3d}, x={2:6.1f}, y={3:6.1f}, mag={4:6.2f}'.format(i, N, xy[0][i], xy[1][i], cat['mag'][i]))
scale = 10**(-0.4*(cat['mag'][i]-ZP))
src = models.Gaussian2D(amplitude=scale, x_mean=xy[0][i], y_mean=xy[1][i], x_stddev=fwhm/pscale/2.35, y_stddev=fwhm/pscale/2.35, theta=0.0)
m_i = src(xp, yp)
flux += m_i
# ds9.view(flux)
clip = (wht[0].data == 0) & (flux > 1.e-6*flux.max())
wht_val = np.percentile(wht[0].data, 95)
wht[0].data[clip] = wht_val
sci[0].data[clip] = flux[clip]
sci.writeto(sci_file.replace('_drz', '_{0}_drz'.format(catalog)),
overwrite=True)
wht.writeto(wht_file.replace('_drz', '_{0}_drz'.format(catalog)),
overwrite=True)
def asn_to_dict(input_asn):
"""Convert an ASN file to a dictionary
Parameters
----------
input_asn : str
Filename of the ASN table
Returns
-------
output : dict
Dictionary with keys 'product' and 'files'.
"""
from stsci.tools import asnutil
# Already is a dict
if isinstance(input_asn, dict):
return input_asn
# String / unicode
if hasattr(input_asn, 'upper'):
asn = asnutil.readASNTable(input_asn)
else:
# asnutil.ASNTable
asn = input_asn
output = {'product': asn['output'],
'files': asn['order']}
return output
# Visit-level ackground subtraction parameters for blot_background
BKG_PARAMS = {'bw': 128, 'bh': 128, 'fw': 3, 'fh': 3, 'pixel_scale': 0.06}
SEPARATE_CHIP_KWARGS = {'filters': ['F200LP','F350LP','F600LP','F390W'],
'stepsize': 10,
'statistic': np.median,
'by_amp':True,
'only_flc':True}
def process_direct_grism_visit(direct={},
grism={},
radec=None,
outlier_threshold=5,
align_clip=30,
align_thresh=None,
align_mag_limits=[14, 23, 0.05],
align_rms_limit=2,
align_triangle_ba_max=0.9,
align_ref_border=100,
align_min_flux_radius=1.,
max_err_percentile=99,
catalog_mask_pad=0.05,
match_catalog_density=None,
column_average=True,
sky_iter=10,
run_tweak_align=True,
tweak_fit_order=-1,
skip_direct=False,
fix_stars=True,
tweak_max_dist=100.,
tweak_n_min=10,
tweak_threshold=1.5,
align_simple=True,
single_image_CRs=True,
drizzle_params={},
iter_atol=1.e-4,
imaging_bkg_params=None,
run_separate_chip_sky=True,
separate_chip_kwargs={},
reference_catalogs=['GAIA', 'PS1',
'SDSS', 'WISE'],
use_self_catalog=False):
"""Full processing of a direct (+grism) image visit.
Notes
-----
For **imaging** exposures:
1) Copies of individual exposures with `~grizli.prep.fresh_flt_file`
* Run `stwcs.updatewcs.updatewcs` on each FLT
#) "tweak" shift alignment of individual FLTs
* If ACS or UVIS, do preliminary `AstroDrizzle` run to flag CRs
#) Run `AstroDrizzle` to create first-pass mosaic
#) Astrometric alignment of the drizzled image reference catalogs with
`~grizli.prep.align_drizzled_image`
* Propagate alignment back to FLT exposures
#) Redrizzle visit mosaic with updated astrometry
#) *optional* Subtract mosaic background from exposures with
`~grizli.prep.blot_background`
#) Make final visit catalog
#) *optional* Fill saturated stars with ePSF models with
`~grizli.prep.fix_star_centers`
For **grism** exposures:
If *grism* exposures are specified, first do the above for the direct
images and then,
1) Assign (refined) WCS of associated direct image to each grism exposure
(`~grizli.prep.match_direct_grism_wcs`)
#) Run `AstroDrizzle` to flag additional CRs, bad pixels
#) Subtract 2D sky background (`~grizli.prep.visit_grism_sky`)
* *optional* additional column-average grism background
#) Redrizzle grism mosaic
"""
frame = inspect.currentframe()
utils.log_function_arguments(utils.LOGFILE, frame,
'prep.process_direct_grism_visit')
#from stsci.tools import asnutil
from stwcs import updatewcs
from drizzlepac import updatehdr
from drizzlepac.astrodrizzle import AstroDrizzle
#################
# Direct image processing
#################
# Copy FLT files from ../RAW
isACS = '_flc' in direct['files'][0] # Also UVIS
isWFPC2 = '_c0' in direct['files'][0]
if not skip_direct:
for file in direct['files']:
crclean = isACS & (len(direct['files']) == 1)
fresh_flt_file(file, crclean=crclean)
try:
updatewcs.updatewcs(file, verbose=False, use_db=False)
except:
updatewcs.updatewcs(file, verbose=False)
# ### Make ASN
# if not isWFPC2:
# asn = asnutil.ASNTable(inlist=direct['files'], output=direct['product'])
# asn.create()
# asn.write()
# Initial grism processing
skip_grism = (grism == {}) | (grism is None) | (len(grism) == 0)
if not skip_grism:
for file in grism['files']:
fresh_flt_file(file)
# Need to force F814W filter for updatewcs
if isACS:
flc = pyfits.open(file, mode='update')
if flc[0].header['INSTRUME'] == 'ACS':
changed_filter = True
flc[0].header['FILTER1'] = 'CLEAR1L'
flc[0].header['FILTER2'] = 'F814W'
flc.flush()
flc.close()
else:
changed_filter = False
flc.close()
else:
changed_filter = False
# Run updatewcs
try:
updatewcs.updatewcs(file, verbose=False, use_db=False)
except:
updatewcs.updatewcs(file, verbose=False)
# Change back
if changed_filter:
flc = pyfits.open(file, mode='update')
flc[0].header['FILTER1'] = 'CLEAR2L'
flc[0].header['FILTER2'] = 'G800L'
flc.flush()
flc.close()
# Make ASN
# asn = asnutil.ASNTable(grism['files'], output=grism['product'])
# asn.create()
# asn.write()
if isACS:
bits = 64+32+256
driz_cr_snr = '3.5 3.0'
driz_cr_scale = '1.2 0.7'
elif isWFPC2:
bits = 64+32
driz_cr_snr = '3.5 3.0'
driz_cr_scale = '1.2 0.7'
else:
bits = 576+256
driz_cr_snr = '8.0 5.0'
driz_cr_scale = '2.5 0.7'
if 'driz_cr_scale' in drizzle_params:
driz_cr_scale = drizzle_params['driz_cr_scale']
drizzle_params.pop('driz_cr_scale')
if 'driz_cr_snr' in drizzle_params:
driz_cr_snr = drizzle_params['driz_cr_snr']
drizzle_params.pop('driz_cr_snr')
if 'bits' in drizzle_params:
bits = drizzle_params['bits']
drizzle_params.pop('bits')
# Relax CR rejection for first-pass ACS
if isACS:
driz_cr_snr_first = '15. 10.0'
driz_cr_scale_first = '1.2 0.7'
else:
driz_cr_snr_first = driz_cr_snr
driz_cr_scale_first = driz_cr_scale
if not skip_direct:
if (not isACS) & (not isWFPC2) & run_tweak_align:
# if run_tweak_align:
tweak_align(direct_group=direct, grism_group=grism,
max_dist=tweak_max_dist, n_min=tweak_n_min,
key=' ', drizzle=False,
threshold=tweak_threshold, fit_order=tweak_fit_order)
if (isACS) & (len(direct['files']) == 1) & single_image_CRs:
find_single_image_CRs(direct, simple_mask=False, with_ctx_mask=False, run_lacosmic=True)
# Get reference astrometry from GAIA, PS1, SDSS, WISE, etc.
if radec is None:
im = pyfits.open(direct['files'][0])
radec, ref_catalog = get_radec_catalog(ra=im[1].header['CRVAL1'],
dec=im[1].header['CRVAL2'],
product=direct['product'],
reference_catalogs=reference_catalogs,
date=im[0].header['EXPSTART'],
date_format='mjd',
use_self_catalog=use_self_catalog)
if ref_catalog == 'VISIT':
align_mag_limits = [16, 23, 0.05]
elif ref_catalog == 'SDSS':
align_mag_limits = [16, 21, 0.05]
elif ref_catalog == 'PS1':
align_mag_limits = [16, 23, 0.05]
elif ref_catalog == 'WISE':
align_mag_limits = [15, 20, 0.05]
else:
ref_catalog = 'USER'
logstr = '# {0}: First Drizzle'.format(direct['product'])
utils.log_comment(utils.LOGFILE, logstr, verbose=True, show_date=True)
# Clean up
for ext in ['.fits', '.log']:
file = '{0}_wcs.{1}'.format(direct['product'], ext)
if os.path.exists(file):
os.remove(file)
# First drizzle
if len(direct['files']) > 1:
AstroDrizzle(direct['files'], output=direct['product'],
clean=True, context=False, preserve=False,
skysub=True, driz_separate=True, driz_sep_wcs=True,
median=True, blot=True, driz_cr=True,
driz_cr_snr=driz_cr_snr_first,
driz_cr_scale=driz_cr_scale_first,
driz_cr_corr=False, driz_combine=True,
final_bits=bits, coeffs=True, build=False,
final_wht_type='IVM', **drizzle_params)
else:
AstroDrizzle(direct['files'], output=direct['product'],
clean=True, final_scale=None, final_pixfrac=1,
context=False, final_bits=bits, preserve=False,
driz_separate=False, driz_sep_wcs=False,
median=False, blot=False, driz_cr=False,
driz_cr_corr=False, driz_combine=True,
build=False, final_wht_type='IVM', **drizzle_params)
# Now do tweak_align for ACS
if (isACS) & run_tweak_align & (len(direct['files']) > 1):
tweak_align(direct_group=direct, grism_group=grism,
max_dist=tweak_max_dist, n_min=tweak_n_min,
key=' ', drizzle=False,
threshold=tweak_threshold)
# Redrizzle with no CR rejection
AstroDrizzle(direct['files'], output=direct['product'],
clean=True, context=False, preserve=False,
skysub=False, driz_separate=False,
driz_sep_wcs=False,
median=False, blot=False, driz_cr=False,
driz_cr_corr=False, driz_combine=True,
final_bits=bits, coeffs=True, build=False,
final_wht_type='IVM', resetbits=0)
# Make catalog & segmentation image
if align_thresh is None:
if isWFPC2:
thresh = 8
else:
thresh = 2
else:
thresh = align_thresh
#cat = make_drz_catalog(root=direct['product'], threshold=thresh)
cat = make_SEP_catalog(root=direct['product'], threshold=thresh)
if radec == 'self':
okmag = ((cat['MAG_AUTO'] > align_mag_limits[0]) &
(cat['MAG_AUTO'] < align_mag_limits[1]))
cat['X_WORLD', 'Y_WORLD'][okmag].write('self',
format='ascii.commented_header',
overwrite=True)
# clip=30
logfile = '{0}_wcs.log'.format(direct['product'])
if os.path.exists(logfile):
os.remove(logfile)
guess_file = '{0}.align_guess'.format(direct['product'])
if os.path.exists(guess_file):
guess = np.loadtxt(guess_file)
else:
guess = [0., 0., 0., 1]
try:
result = align_drizzled_image(root=direct['product'],
mag_limits=align_mag_limits,
radec=radec, NITER=3, clip=align_clip,
log=True, guess=guess,
outlier_threshold=outlier_threshold,
simple=align_simple,
rms_limit=align_rms_limit,
max_err_percentile=max_err_percentile,
catalog_mask_pad=catalog_mask_pad,
triangle_size_limit=[5, 2400*(1+isACS)],
triangle_ba_max=align_triangle_ba_max,
match_catalog_density=match_catalog_density,
ref_border=align_ref_border,
min_flux_radius=align_min_flux_radius)
except:
utils.log_exception(utils.LOGFILE, traceback)
utils.log_comment(utils.LOGFILE, "# !! Drizzle alignment failed")
fp = open('{0}.wcs_failed'.format(direct['product']), 'w')
fp.write(guess.__str__())
fp.close()
# Does nothing but moves forward
result = align_drizzled_image(root=direct['product'],
mag_limits=align_mag_limits,
radec=radec, NITER=0, clip=align_clip,
log=False, guess=guess,
outlier_threshold=outlier_threshold,
simple=align_simple,
rms_limit=align_rms_limit,
max_err_percentile=max_err_percentile,
catalog_mask_pad=catalog_mask_pad,
match_catalog_density=match_catalog_density,
ref_border=align_ref_border,
min_flux_radius=align_min_flux_radius)
orig_wcs, drz_wcs, out_shift, out_rot, out_scale = result
# Update direct FLT WCS
for file in direct['files']:
xyscale = [out_shift[0], out_shift[1], out_rot, out_scale]
update_wcs_fits_log(file, orig_wcs,
xyscale=xyscale,
initialize=False,
replace=('.fits', '.wcslog.fits'),
wcsname=ref_catalog)
updatehdr.updatewcs_with_shift(file,
str('{0}_wcs.fits'.format(direct['product'])),
xsh=out_shift[0], ysh=out_shift[1],
rot=out_rot, scale=out_scale,
wcsname=ref_catalog, force=True,
reusename=True, verbose=True,
sciext='SCI')
# Bug in astrodrizzle? Dies if the FLT files don't have MJD-OBS
# keywords
im = pyfits.open(file, mode='update')
im[0].header['MJD-OBS'] = im[0].header['EXPSTART']
im.flush()
# Second drizzle with aligned wcs, refined CR-rejection params
# tuned for WFC3/IR
logstr = '# {0}: Second Drizzle'.format(direct['product'])
utils.log_comment(utils.LOGFILE, logstr, verbose=True, show_date=True)
if len(direct['files']) == 1:
AstroDrizzle(direct['files'], output=direct['product'],
clean=True, final_pixfrac=0.8, context=False,
resetbits=4096, final_bits=bits, driz_sep_bits=bits,
preserve=False, driz_cr_snr=driz_cr_snr,
driz_cr_scale=driz_cr_scale, driz_separate=False,
driz_sep_wcs=False, median=False, blot=False,
driz_cr=False, driz_cr_corr=False,
build=False, final_wht_type='IVM', **drizzle_params)
else:
if 'par' in direct['product']:
pixfrac = 1.0
else:
pixfrac = 0.8
AstroDrizzle(direct['files'], output=direct['product'],
clean=True, final_pixfrac=pixfrac,
context=(isACS | isWFPC2),
resetbits=4096, final_bits=bits, driz_sep_bits=bits,
preserve=False, driz_cr_snr=driz_cr_snr,
driz_cr_scale=driz_cr_scale, build=False,
final_wht_type='IVM', **drizzle_params)
# Flag areas of ACS images covered by a single image, where
# CRs aren't appropriately masked
is_single = (len(direct['files']) == 1)
if (single_image_CRs) & (isACS | isWFPC2):
logstr = '# Mask areas of the mosaic covered by a single input image'
utils.log_comment(utils.LOGFILE, logstr, verbose=True)
try:
find_single_image_CRs(direct, simple_mask=(not is_single), with_ctx_mask=(not is_single), run_lacosmic=is_single)
except:
utils.log_exception(utils.LOGFILE, traceback)
pass
# Make DRZ catalog again with updated DRZWCS
clean_drizzle(direct['product'])
# Subtract visit-level background based on the drizzled mosaic
if imaging_bkg_params is not None:
logstr = '# Imaging background: {0}'.format(imaging_bkg_params)
utils.log_comment(utils.LOGFILE, logstr, verbose=True)
bkg_params = imaging_bkg_params.copy()
if 'get_median' in bkg_params:
get_median = bkg_params.pop('get_median')
else:
get_median = False
blot_background(visit=direct, bkg_params=bkg_params,
verbose=True, skip_existing=True,
get_median=get_median)
if run_separate_chip_sky:
separate_chip_sky(direct, **separate_chip_kwargs)
# Chip-level background for some UVIS filters
# uvis_chip_background(visit=direct)
# Remake catalog
#cat = make_drz_catalog(root=direct['product'], threshold=thresh)
cat = make_SEP_catalog(root=direct['product'], threshold=thresh)
# 140 brightest or mag range
clip = (cat['MAG_AUTO'] > align_mag_limits[0]) & (cat['MAG_AUTO'] < align_mag_limits[1])
if len(align_mag_limits) > 2:
clip &= cat['MAGERR_AUTO'] < align_mag_limits[2]
else:
clip &= cat['MAGERR_AUTO'] < 0.05
clip &= utils.catalog_mask(cat, max_err_percentile=max_err_percentile,
pad=catalog_mask_pad,
pad_is_absolute=False,
min_flux_radius=align_min_flux_radius)
NMAX = 140
so = np.argsort(cat['MAG_AUTO'][clip])
if clip.sum() > NMAX:
so = so[:NMAX]
table_to_regions(cat[clip][so], '{0}.cat.reg'.format(direct['product']))
if not ((isACS | isWFPC2) & is_single):
table_to_radec(cat[clip][so], '{0}.cat.radec'.format(direct['product']))
if (fix_stars) & (not isACS) & (not isWFPC2):
fix_star_centers(root=direct['product'], drizzle=False,
mag_lim=19.5)
#################
# Grism image processing
#################
if skip_grism:
return True
# Match grism WCS to the direct images
match_direct_grism_wcs(direct=direct, grism=grism, get_fresh_flt=False)
# First drizzle to flag CRs
gris_cr_corr = len(grism['files']) > 1
AstroDrizzle(grism['files'], output=grism['product'], clean=True,
context=False, preserve=False, skysub=True,
driz_separate=gris_cr_corr, driz_sep_wcs=gris_cr_corr, median=gris_cr_corr,
blot=gris_cr_corr, driz_cr=gris_cr_corr, driz_cr_corr=gris_cr_corr,
driz_cr_snr=driz_cr_snr, driz_cr_scale=driz_cr_scale,
driz_combine=True, final_bits=bits, coeffs=True,
resetbits=4096, build=False, final_wht_type='IVM')
# Subtract grism sky
status = visit_grism_sky(grism=grism, apply=True, sky_iter=sky_iter,
column_average=column_average, verbose=True, ext=1,
iter_atol=iter_atol)
# Run on second chip (also for UVIS/G280)
if isACS:
visit_grism_sky(grism=grism, apply=True, sky_iter=sky_iter,
column_average=column_average, verbose=True, ext=2,
iter_atol=iter_atol)
# Add back in some pedestal or CR rejection fails for ACS
for file in grism['files']:
flt = pyfits.open(file, mode='update')
h = flt[0].header
flat_sky = h['GSKY101']*h['EXPTIME']
# Use same pedestal for both chips for skysub
for ext in [1, 2]:
flt['SCI', ext].data += flat_sky
flt.flush()
# Redrizzle with new background subtraction
if isACS:
skyfile = ''
else:
skyfile = '/tmp/{0}.skyfile'.format(grism['product'])
fp = open(skyfile, 'w')
fp.writelines(['{0} 0.0\n'.format(f) for f in grism['files']])
fp.close()
if 'par' in grism['product']:
pixfrac = 1.0
else:
pixfrac = 0.8
AstroDrizzle(grism['files'], output=grism['product'], clean=True,
context=isACS, preserve=False, skysub=True, skyfile=skyfile,
driz_separate=gris_cr_corr, driz_sep_wcs=gris_cr_corr, median=gris_cr_corr,
blot=gris_cr_corr, driz_cr=gris_cr_corr, driz_cr_corr=gris_cr_corr,
driz_cr_snr=driz_cr_snr, driz_cr_scale=driz_cr_scale,
driz_combine=True, driz_sep_bits=bits, final_bits=bits,
coeffs=True, resetbits=4096, final_pixfrac=pixfrac,
build=False, final_wht_type='IVM')
clean_drizzle(grism['product'])
# Add direct filter to grism FLT headers
set_grism_dfilter(direct, grism)
return True
def set_grism_dfilter(direct, grism):
"""Set direct imaging filter for grism exposures
Parameters
----------
direct, grism : dict
Returns
-------
Nothing
"""
d_im = pyfits.open(direct['files'][0])
direct_filter = utils.get_hst_filter(d_im[0].header)
for file in grism['files']:
if '_flc' in file:
ext = [1, 2]
else:
ext = [1]
print('DFILTER: {0} {1}'.format(file, direct_filter))
flt = pyfits.open(file, mode='update')
for e in ext:
flt['SCI', e].header['DFILTER'] = (direct_filter,
'Direct imaging filter')
flt.flush()
def tweak_align(direct_group={}, grism_group={}, max_dist=1., n_min=10, key=' ', threshold=3, drizzle=False, fit_order=-1):
"""Intra-visit shift alignment
Parameters
----------
direct_group : dict
Visit info (`product`, `files`) for direct images
grism_group : dict
Visit info (`product`, `files`) for grism images
max_dist, threshold : float
Passed to `~grizli.prep.tweak_flt`
n_min : int
Minimum number of sources for a valid fit.
drizzle : bool
Run `AstroDrizzle` after performing the alignment
fit_order : int
If > 0, then fit a polynomial to the derived shifts rather than
using the shifts themselves, e.g., for DASH imaging
Returns
-------
Nothing, but updates WCS of direct and (optionally) grism exposures
"""
frame = inspect.currentframe()
utils.log_function_arguments(utils.LOGFILE, frame,
'prep.tweak_align')
from drizzlepac.astrodrizzle import AstroDrizzle
from scipy import polyfit, polyval
if len(direct_group['files']) < 2:
logstr = '# ! {0}: Only one direct image found, can\'t compute shifts'
logstr = logstr.format(direct_group['product'])
utils.log_comment(utils.LOGFILE, logstr, verbose=True)
return True
wcs_ref, shift_dict = tweak_flt(files=direct_group['files'],
max_dist=max_dist, threshold=threshold,
verbose=True)
grism_matches = find_direct_grism_pairs(direct=direct_group, grism=grism_group, check_pixel=[507, 507], toler=0.1, key=key)
logstr = '\ngrism_matches = {0}\n'.format(grism_matches)
utils.log_comment(utils.LOGFILE, logstr, verbose=True)
fp = open('{0}_shifts.log'.format(direct_group['product']), 'w')
fp.write('# flt xshift yshift rot scale N rmsx rmsy\n')
fp.write('# fit_order: {0}\n'.format(fit_order))
for k in grism_matches:
d = shift_dict[k]
fp.write('# match[\'{0}\'] = {1}\n'.format(k, grism_matches[k]))
for k in shift_dict:
d = shift_dict[k]
n_i = d[4]
if (n_i < n_min) | (np.abs(d[:2]).max() > max_dist):
fp.write('# ! {0:s} {1:7.3f} {2:7.3f} {3:8.5f} {4:8.5f} {5:5d} {6:6.3f} {7:6.3f}\n'.format(k, d[0], d[1], d[2], d[3], d[4], d[5][0], d[5][1]))
d[0] = d[1] = 0.
fp.write('{0:s} {1:7.3f} {2:7.3f} {3:8.5f} {4:8.5f} {5:5d} {6:6.3f} {7:6.3f}\n'.format(k, d[0], d[1], d[2], d[3], d[4], d[5][0], d[5][1]))
fp.close()
# Fit a polynomial, e.g., for DASH
if fit_order > 0:
logstr = '# {0}: Fit polynomial order={1} to shifts.'
logstr = logstr.format(direct_group['product'], fit_order)
utils.log_comment(utils.LOGFILE, logstr, verbose=True)
shifts = np.array([shift_dict[k][:2] for k in sorted(shift_dict)])
t = np.arange(shifts.shape[0])
cx = polyfit(t, shifts[:, 0], fit_order)
sx = polyval(cx, t)
cy = polyfit(t, shifts[:, 1], fit_order)
sy = polyval(cy, t)
fit_shift = np.array([sx, sy]).T
for ik, k in enumerate(sorted(shift_dict)):
shift_dict[k][:2] = fit_shift[ik, :]
# Apply the shifts to the header WCS
apply_tweak_shifts(wcs_ref, shift_dict, grism_matches=grism_matches,
verbose=False)
if not drizzle:
return True
# Redrizzle
bits = 576
driz_cr_snr = '8.0 5.0'
driz_cr_scale = '2.5 0.7'
if 'par' in direct_group['product']:
pixfrac = 1.0
else:
pixfrac = 0.8
AstroDrizzle(direct_group['files'], output=direct_group['product'],
clean=True, final_pixfrac=pixfrac, context=False,
resetbits=4096, final_bits=bits, driz_sep_bits=bits,
preserve=False, driz_cr_snr=driz_cr_snr,
driz_cr_scale=driz_cr_scale, build=False,
final_wht_type='IVM')
clean_drizzle(direct_group['product'])
#cat = make_drz_catalog(root=direct_group['product'], threshold=1.6)
cat = make_SEP_catalog(root=direct_group['product'], threshold=1.6)
table_to_regions(cat, '{0}.cat.reg'.format(direct_group['product']))
if (grism_group == {}) | (grism_group is None):
return True
# Grism
skyfile = '/tmp/{0}.skyfile'.format(grism_group['product'])
fp = open(skyfile, 'w')
fp.writelines(['{0} 0.0\n'.format(f) for f in grism_group['files']])
fp.close()
AstroDrizzle(grism_group['files'], output=grism_group['product'],
clean=True, context=False, preserve=False, skysub=True,
skyfile=skyfile, driz_separate=True, driz_sep_wcs=True,
median=True, blot=True, driz_cr=True, driz_cr_corr=True,
driz_combine=True, driz_sep_bits=bits, final_bits=bits,
coeffs=True, resetbits=4096, final_pixfrac=pixfrac,
build=False, final_wht_type='IVM')
clean_drizzle(grism_group['product'])
return True
def drizzle_footprint(weight_image, shrink=10, ext=0, outfile=None, label=None):
"""
Footprint of image pixels where values > 0. Works best with drizzled
weight images.
(not used)
"""
from scipy.spatial import ConvexHull
im = pyfits.open(weight_image)
wcs = pywcs.WCS(im[ext].header, fobj=im)
sh = np.array(im[ext].data.shape)//shrink
yp, xp = np.indices(tuple(sh))*shrink
nonzero = im[ext].data[yp, xp] > 0
h = ConvexHull(np.array([xp[nonzero], yp[nonzero]]).T)
hx = xp[nonzero][h.vertices]
hy = yp[nonzero][h.vertices]
hrd = wcs.all_pix2world(np.stack([hx, hy]).T, 0)
pstr = 'polygon('+','.join(['{0:.6f}'.format(i) for i in hrd.flatten()])+')'
if label is not None:
pstr += ' # text={{{0}}}'.format(label)
if outfile is None:
return pstr
fp = open(outfile, 'w')
fp.write('fk5\n')
fp.write(pstr+'\n')
fp.close()
def clean_drizzle(root, context=False, fix_wcs_system=False):
"""Zero-out WHT=0 pixels in drizzle mosaics
Parameters
----------
root : str
Rootname of the mosaics. I.e., `{root}_drz_sci.fits`.
Returns
-------
Nothing, science mosaic modified in-place
"""
try:
drz_file = glob.glob('{0}_dr[zc]_sci.fits'.format(root))[0]
is_build = False
sci_ext = 0
except:
drz_file = glob.glob('{0}_dr[zc].fits'.format(root))[0]
sci_ext = 1
is_build = True
# Is result from build=True?
sci = pyfits.open(drz_file, mode='update')
if is_build:
mask = sci['WHT'].data == 0
else:
wht = pyfits.open(drz_file.replace('_sci.fits', '_wht.fits'))
mask = wht[0].data == 0
if fix_wcs_system:
# Force RADESYS/EQUINOX = ICRS/2000. and fix LATPOLE to CRVAL2
sci[sci_ext].header['RADESYS'] = 'ICRS'
sci[sci_ext].header['EQUINOX'] = 2000.0
sci[sci_ext].header['LATPOLE'] = sci[sci_ext].header['CRVAL2']
# Mask where context shows that mosaic comes from a single input
ctx_file = drz_file.replace('_sci.', '_ctx.')
if context & os.path.exists(ctx_file):
ctx = pyfits.open(ctx_file)
bits = np.log(ctx[0].data)/np.log(2)
# bits = round(bits) when is a power of 2
mask &= bits != np.round(bits)
sci[sci_ext].data[mask] = 0
# Rescale WFPC2 to ~WFC3 image zeropoint
if sci[0].header['INSTRUME'] == 'WFPC2':
#exptime = sci[0].header['EXPTIME']
scl = sci[0].header['PHOTFLAM'] / 1.5e-20
#sci[0].data /= exptime
sci[sci_ext].data *= scl
for k in ['PHOTFLAM', 'PHOTFNU']:
if k in sci[0].header:
sci[0].header[k] /= scl
if is_build:
sci['WHT'].data /= scl**2
else:
wht = pyfits.open(drz_file.replace('_sci.fits', '_wht.fits'),
mode='update')
wht[0].data /= scl**2
wht.flush()
sci.flush()
MATCH_KWS = dict(maxKeep=10, auto_keep=3, auto_transform=None, auto_limit=3,
size_limit=[5, 1800], ignore_rot=True, ignore_scale=True,
ba_max=0.9)
def tweak_flt(files=[], max_dist=0.4, threshold=3, verbose=True, tristars_kwargs=MATCH_KWS, use_sewpy=False):
"""Refine shifts of FLT files
Parameters
----------
files : list
List of flt filenames
max_dist : float
Maximum shift distance to allow
threshold : float
Source detection threshold for `sep.extract`
verbose : bool
Status messages
tristars_kwargs : dict
Keyword arguments for `tristars.match.match_catalog_tri`
use_sewpy : bool
Use `sewpy` for source detection (deprecated)
Returns
-------
ref_wcs : `~astropy.wcs.WCS`
Reference WCS (WCS of the first file in `files`)
shift_dict : dict
Shift dictionary with keys from `files` and values like
``[xshift, yshift, rot, scale, N, rms]``. Note that only shifts are
fit, so `rot = 0.` and `scale = 1.`. ``N`` is the number of sources
used for the fit.
"""
import scipy.spatial
try:
import tristars
from tristars.match import match_catalog_tri, match_diagnostic_plot
except:
print("""
Couldn't `import tristars`. Get it from https://github.com/gbrammer/tristars to enable improved blind astrometric matching with triangle asterisms.
""")
try:
# https://github.com/megalut/sewpy
import sewpy
except:
sewpy = None
use_sewpy = False
# Make FLT catalogs
cats = []
logstr = '### Tweak alignment (use_sewpy={0}) '.format(use_sewpy)
utils.log_comment(utils.LOGFILE, logstr, verbose=True)
for i, file in enumerate(files):
root = file.split('.fits')[0]
im = pyfits.open(file)
try:
ok = im['DQ', 1].data == 0
except:
ok = np.isfinite(im['SCI', 1].data)
sci = im['SCI', 1].data*ok - np.median(im['SCI', 1].data[ok])
header = im['SCI', 1].header.copy()
for k in ['PHOTFNU', 'PHOTFLAM', 'PHOTPLAM', 'FILTER']:
if k in im[0].header:
header[k] = im[0].header[k]
hst_filter = utils.get_hst_filter(im[0].header)
header['FILTER'] = hst_filter
pyfits.writeto('{0}_xsci.fits'.format(root), data=sci,
header=header,
overwrite=True)
pyfits.writeto('{0}_xrms.fits'.format(root), data=im['ERR', 1].data,
header=im['ERR', 1].header, overwrite=True)
if use_sewpy:
params = ["X_IMAGE", "Y_IMAGE", "X_WORLD", "Y_WORLD",
"FLUX_RADIUS(3)", "FLAGS"]
sew = sewpy.SEW(params=params,
config={"DETECT_THRESH": threshold,
"DETECT_MINAREA": 8,
"PHOT_FLUXFRAC": "0.3, 0.5, 0.8",
"WEIGHT_TYPE": "MAP_RMS",
"WEIGHT_IMAGE": "{0}_xrms.fits".format(root)})
output = sew('{0}_xsci.fits'.format(root))
cat = output['table']
else:
# SEP
wht = 1/im['ERR', 1].data**2
wht[~(np.isfinite(wht))] = 0
pyfits.writeto('{0}_xwht.fits'.format(root), data=wht,
header=im['ERR', 1].header, overwrite=True)
pars = SEP_DETECT_PARAMS.copy()
pars['minarea'] = 8
cat = make_SEP_catalog(root=root,
sci='{0}_xsci.fits'.format(root),
wht='{0}_xwht.fits'.format(root),
threshold=threshold, detection_params=pars,
get_background=True, verbose=False)
######
if '_flc' in file:
wcs = pywcs.WCS(im['SCI', 1].header, fobj=im, relax=True)
else:
wcs = pywcs.WCS(im['SCI', 1].header, relax=True)
cats.append([cat, wcs])
for ext in ['_xsci', '_xrms', '_xwht', '_bkg', '_seg', '.cat']:
file = '{0}{1}.fits'.format(root, ext)
if os.path.exists(file):
os.remove(file)
c0 = cats[0][0]
not_CR = c0['FLUX_RADIUS'] > 1.5
c0 = c0[not_CR]
wcs_0 = cats[0][1]
xy_0 = np.array([c0['X_IMAGE'], c0['Y_IMAGE']]).T
tree = scipy.spatial.cKDTree(xy_0, 10)
try:
# Use Tristars for matching
# First 100
NMAX = 100
if len(xy_0) > NMAX:
so = np.argsort(c0['MAG_AUTO'])
xy_0 = xy_0[so[:NMAX], :]
shift_dict = OrderedDict()
for i in range(0, len(files)):
c_ii, wcs_i = cats[i]
not_CR = c_ii['FLUX_RADIUS'] > 1.5
c_i = c_ii[not_CR]
# SExtractor doesn't do SIP WCS?
rd = np.array(wcs_i.all_pix2world(c_i['X_IMAGE'], c_i['Y_IMAGE'], 1))
xy = np.array(wcs_0.all_world2pix(rd.T, 1))
if len(xy) > NMAX:
so = np.argsort(c_i['MAG_AUTO'])
xy = xy[so[:NMAX], :]
pair_ix = match_catalog_tri(xy, xy_0, **tristars_kwargs)
# if False:
# match_diagnostic_plot(xy, xy_0, pair_ix, tf=None,
# new_figure=False)
dr = xy[pair_ix[:, 0], :] - xy_0[pair_ix[:, 1], :]
ok = dr.max(axis=1) < 1000
dx = np.median(dr[ok, :], axis=0)
rms = np.std(dr[ok, :], axis=0)/np.sqrt(ok.sum())
shift_dict[files[i]] = [dx[0], dx[1], 0.0, 1.0, ok.sum(), rms]
lstr = "# tw {0} [{1:6.3f}, {2:6.3f}] [{3:6.3f}, {4:6.3f}] N={5}"
lstr = lstr.format(files[i], dx[0], dx[1], rms[0], rms[1],
ok.sum())
utils.log_comment(utils.LOGFILE, lstr, verbose=verbose)
except:
utils.log_exception(utils.LOGFILE, traceback)
utils.log_comment(utils.LOGFILE, "# !! `tweak_flt` tristars failed")
shift_dict = OrderedDict()
for i in range(0, len(files)):
c_i, wcs_i = cats[i]
# SExtractor doesn't do SIP WCS?
rd = wcs_i.all_pix2world(c_i['X_IMAGE'], c_i['Y_IMAGE'], 1)
xy = np.array(wcs_0.all_world2pix(np.array(rd).T, 1))
N = xy.shape[0]
dist, ix = np.zeros(N), np.zeros(N, dtype=int)
for j in range(N):
dist[j], ix[j] = tree.query(xy[j, :], k=1,
distance_upper_bound=np.inf)
ok = dist < max_dist
if ok.sum() == 0:
shift_dict[files[i]] = [0.0, 0.0, 0.0, 1.0]
logstr = '# tw {0}'.format(files[i], '! no match')
utils.log_comment(utils.LOGFILE, logstr, verbose=True)
continue
dr = xy - xy_0[ix, :]
dx = np.median(dr[ok, :], axis=0)
rms = np.std(dr[ok, :], axis=0)/np.sqrt(ok.sum())
shift_dict[files[i]] = [dx[0], dx[1], 0.0, 1.0, ok.sum(), rms]
lstr = '# tw {0} {1} {2} N={3}'
lstr = logstr.format(files[i], dx, rms, ok.sum())
utils.log_comment(utils.LOGFILE, logstr, verbose=verbose)
wcs_ref = cats[0][1]
return wcs_ref, shift_dict
def apply_tweak_shifts(wcs_ref, shift_dict, grism_matches={}, verbose=True, log=True):
"""
Apply derived shifts to exposure WCS
Parameters
----------
wcs_ref : `~astropy.wcs.WCS`
Reference WCS against where shifts were computed
shift_dict : dict
Dictionary of shift information
>>> shift_dict[file] = [xshift, yshift, rot, scale]
grism_matches : dict
Dictionary defining associated grism / direct image exposures
verbose : bool
Print status information to the console
log : bool
Log arguments to `grizli.utils.LOGFILE`
Returns
-------
Nothing, FLT image WCS are modified in place
"""
from drizzlepac import updatehdr
if log:
frame = inspect.currentframe()
utils.log_function_arguments(utils.LOGFILE, frame,
'prep.apply_tweak_shifts')
hdu = wcs_ref.to_fits(relax=True)
file0 = list(shift_dict.keys())[0].split('.fits')[0]
tweak_file = '{0}_tweak_wcs.fits'.format(file0)
hdu.writeto(tweak_file, overwrite=True)
for file in shift_dict:
xyscale = shift_dict[file][:2]+[0., 1]
update_wcs_fits_log(file, wcs_ref, xyscale=xyscale, initialize=True,
replace=('.fits', '.wcslog.fits'),
wcsname='SHIFT')
updatehdr.updatewcs_with_shift(file, tweak_file,
xsh=shift_dict[file][0],
ysh=shift_dict[file][1],
rot=0., scale=1.,
wcsname='SHIFT', force=True,
reusename=True, verbose=verbose,
sciext='SCI')
# Bug in astrodrizzle? Dies if the FLT files don't have MJD-OBS
# keywords
im = pyfits.open(file, mode='update')
im[0].header['MJD-OBS'] = im[0].header['EXPSTART']
im.flush()
# Update paired grism exposures
if file in grism_matches:
for grism_file in grism_matches[file]:
xyscale = shift_dict[file][:2]+[0., 1]
update_wcs_fits_log(grism_file, wcs_ref, xyscale=xyscale,
initialize=True,
replace=('.fits', '.wcslog.fits'),
wcsname='SHIFT')
updatehdr.updatewcs_with_shift(grism_file, tweak_file,
xsh=shift_dict[file][0],
ysh=shift_dict[file][1],
rot=0., scale=1.,
wcsname='SHIFT', force=True,
reusename=True, verbose=verbose,
sciext='SCI')
# Bug in astrodrizzle?
im = pyfits.open(grism_file, mode='update')
im[0].header['MJD-OBS'] = im[0].header['EXPSTART']
im.flush()
os.remove(tweak_file)
def find_direct_grism_pairs(direct={}, grism={}, check_pixel=[507, 507],
toler=0.1, key='A', same_visit=True, log=True):
"""Compute pairs of direct and grism exposures
For each grism exposure, check if there is a direct exposure
that matches the WCS to within `toler` pixels. If so, copy that WCS
directly.
Parameters
----------
direct : dict
Direct image visit dictionary (`product`, `files`)
grism : dict
Grism image visit dictionary (`product`, `files`)
check_pixel : (float, float)
Reference pixel to use for comparing WCS
toler : float
Tolerance in pixels for assigning matched exposure pairs
key : str
WCS key of the direct image WCS
same_visit : bool
Require that matches are from same program / visit as defined by the
first 6 characters in the image filenames
log : bol
Write function call to `grizli.utils.LOGFILE`
Returns
-------
grism_matches : dict
Dictionary of the matched exposures, where the keys are filenames
of direct images and the values are lists of the computed associated
grism exposures
"""
if log:
frame = inspect.currentframe()
utils.log_function_arguments(utils.LOGFILE, frame,
'prep.find_direct_grism_pairs')
direct_wcs = {}
full_direct_wcs = {}
direct_rd = {}
grism_wcs = {}
grism_pix = {}
grism_matches = OrderedDict()
for file in direct['files']:
grism_matches[file] = []
im = pyfits.open(file)
#direct_wcs[file] = pywcs.WCS(im[1].header, relax=True, key=key)
#full_direct_wcs[file] = pywcs.WCS(im[1].header, relax=True)
if '_flc' in file:
direct_wcs[file] = pywcs.WCS(im[1].header, fobj=im, relax=True,
key=key)
full_direct_wcs[file] = pywcs.WCS(im[1].header, fobj=im,
relax=True)
else:
direct_wcs[file] = pywcs.WCS(im[1].header, relax=True, key=key)
full_direct_wcs[file] = pywcs.WCS(im[1].header, relax=True)
direct_rd[file] = direct_wcs[file].all_pix2world([check_pixel], 1)
if 'files' not in grism:
return grism_matches
for file in grism['files']:
im = pyfits.open(file)
if '_flc' in file:
grism_wcs[file] = pywcs.WCS(im[1].header, relax=True, key=key,
fobj=im)
else:
grism_wcs[file] = pywcs.WCS(im[1].header, relax=True, key=key)
# print file
delta_min = 10
for d in direct['files']:
if (os.path.basename(d)[:6] != os.path.basename(file)[:6]) & same_visit:
continue
pix = grism_wcs[file].all_world2pix(direct_rd[d], 1)
dx = pix-np.array(check_pixel)
delta = np.sqrt(np.sum(dx**2))
# print ' %s %s, %.3f' %(d, dx, delta)
if delta < delta_min:
delta_min = delta
delta_min_file = d
if delta_min < toler:
grism_matches[delta_min_file].append(file)
return grism_matches
def match_direct_grism_wcs(direct={}, grism={}, get_fresh_flt=True,
run_drizzle=True, xyscale=None):
"""Match WCS of grism exposures to corresponding direct images
Parameters
----------
direct : dict
Direct image visit dictionary (`product`, `files`)
grism : dict
Grism image visit dictionary (`product`, `files`)
get_fresh_flt : bool
Get fresh versions of the grism exposures without any subsequent
modifications
run_drizzle : bool
Not used
xyscale : None, list
Transformation parameters ``[xshift, yshift, rot, scale]``. If not
specified, then get from the `wcs.log` file associated with the
direct images
Returns
-------
Nothing, WCS headers updated in the grism FLT files
"""
from drizzlepac import updatehdr
from stwcs import updatewcs
from drizzlepac.astrodrizzle import AstroDrizzle
if get_fresh_flt:
for file in grism['files']:
fresh_flt_file(file)
try:
updatewcs.updatewcs(file, verbose=False, use_db=False)
except:
updatewcs.updatewcs(file, verbose=False)
direct_flt = pyfits.open(direct['files'][0])
ref_catalog = direct_flt['SCI', 1].header['WCSNAME']
# User-defined shifts
if xyscale is not None:
# Use user-defined shifts
xsh, ysh, rot, scale = xyscale
tmp_wcs_file = '/tmp/{0}_tmpwcs.fits'.format(str(direct['product']))
try:
# Use WCS in catalog file
wcs_hdu = pyfits.open('{0}.cat.fits'.format(direct['product']))
ext = 'WCS'
except:
wcs_hdu = pyfits.open('{0}_wcs.fits'.format(direct['product']))
ext = len(wcs_hdu)-1
wcs_hdu[ext].writeto(tmp_wcs_file, overwrite=True)
tmp_wcs = pywcs.WCS(wcs_hdu[ext].header, relax=True)
for file in grism['files']:
xyscale = [xsh, ysh, rot, scale]
update_wcs_fits_log(file, tmp_wcs,
xyscale=xyscale,
initialize=False,
replace=('.fits', '.wcslog.fits'),
wcsname=ref_catalog)
updatehdr.updatewcs_with_shift(file, tmp_wcs_file,
xsh=xsh,
ysh=ysh,
rot=rot, scale=scale,
wcsname=ref_catalog, force=True,
reusename=True, verbose=True,
sciext='SCI')
# Bug in astrodrizzle? Dies if the FLT files don't have MJD-OBS
# keywords
im = pyfits.open(file, mode='update')
im[0].header['MJD-OBS'] = im[0].header['EXPSTART']
im.flush()
return True
# Get from WCS log file
wcs_log = Table.read('{0}_wcs.log'.format(direct['product']),
format='ascii.commented_header')
wcs_hdu = pyfits.open('{0}_wcs.fits'.format(direct['product']))
for ext in wcs_log['ext']:
tmp_wcs_file = '/tmp/{0}_tmpwcs.fits'.format(str(direct['product']))
wcs_hdu[ext].writeto(tmp_wcs_file, overwrite=True)
tmp_wcs = pywcs.WCS(wcs_hdu[ext].header, relax=True)
if 'scale' in wcs_log.colnames:
scale = wcs_log['scale'][ext]
else:
scale = 1.
for file in grism['files']:
xyscale = [wcs_log['xshift'][ext], wcs_log['yshift'][ext],
wcs_log['rot'][ext], scale]
update_wcs_fits_log(file, tmp_wcs,
xyscale=xyscale,
initialize=False,
replace=('.fits', '.wcslog.fits'),
wcsname=ref_catalog)
updatehdr.updatewcs_with_shift(file, tmp_wcs_file,
xsh=wcs_log['xshift'][ext],
ysh=wcs_log['yshift'][ext],
rot=wcs_log['rot'][ext], scale=scale,
wcsname=ref_catalog, force=True,
reusename=True, verbose=True,
sciext='SCI')
# Bug in astrodrizzle? Dies if the FLT files don't have MJD-OBS
# keywords
im = pyfits.open(file, mode='update')
im[0].header['MJD-OBS'] = im[0].header['EXPSTART']
im.flush()
# Bug in astrodrizzle? Dies if the FLT files don't have MJD-OBS
# keywords
for file in grism['files']:
im = pyfits.open(file, mode='update')
im[0].header['MJD-OBS'] = im[0].header['EXPSTART']
im.flush()
def visit_grism_sky(grism={}, apply=True, column_average=True, verbose=True, ext=1, sky_iter=10, iter_atol=1.e-4, use_spline=True, NXSPL=50):
"""Subtract sky background from grism exposures
Implementation of the multi-component grism sky subtraction from
`WFC3/ISR 2015-17 <https://ui.adsabs.harvard.edu/abs/2015wfc..rept...17B>`_
"""
import numpy.ma
import scipy.ndimage as nd
frame = inspect.currentframe()
utils.log_function_arguments(utils.LOGFILE, frame,
'prep.visit_grism_sky')
#from sklearn.gaussian_process import GaussianProcess
from sklearn.gaussian_process import GaussianProcessRegressor
from sklearn.gaussian_process.kernels import RBF, WhiteKernel
# Figure out which grism
im = pyfits.open(grism['files'][0])
grism_element = utils.get_hst_filter(im[0].header)
flat = 1.
if grism_element == 'G141':
bg_fixed = ['zodi_G141_clean.fits']
bg_vary = ['zodi_G141_clean.fits', 'excess_lo_G141_clean.fits',
'G141_scattered_light.fits'][1:]
isACS = False
elif grism_element == 'G102':
bg_fixed = ['zodi_G102_clean.fits']
bg_vary = ['excess_G102_clean.fits']
isACS = False
elif grism_element == 'G280':
bg_fixed = ['UVIS.G280.flat.fits']
bg_vary = ['UVIS.G280.ext{0:d}.sky.fits'.format(ext)]
isACS = True
flat = 1.
elif grism_element == 'G800L':
bg_fixed = ['ACS.WFC.CHIP{0:d}.msky.1.smooth.fits'.format({1: 2, 2: 1}[ext])]
bg_vary = ['ACS.WFC.flat.fits']
#bg_fixed = ['ACS.WFC.CHIP%d.msky.1.fits' %({1:2,2:1}[ext])]
#bg_fixed = []
isACS = True
flat_files = {'G800L': 'n6u12592j_pfl.fits'} # F814W
flat_file = flat_files[grism_element]
flat_im = pyfits.open(os.path.join(os.getenv('jref'), flat_file))
flat = flat_im['SCI', ext].data.flatten()
logstr = '# visit_grism_sky / {0}: EXTVER={1:d} / {2} / {3}'
logstr = logstr.format(grism['product'], ext, bg_fixed, bg_vary)
utils.log_comment(utils.LOGFILE, logstr, verbose=verbose)
if not isACS:
ext = 1
# Read sky files
data_fixed = []
for file in bg_fixed:
im = pyfits.open('{0}/CONF/{1}'.format(GRIZLI_PATH, file))
sh = im[0].data.shape
data = im[0].data.flatten()/flat
data_fixed.append(data)
data_vary = []
for file in bg_vary:
im = pyfits.open('{0}/CONF/{1}'.format(GRIZLI_PATH, file))
data_vary.append(im[0].data.flatten()*1)
sh = im[0].data.shape
yp, xp = np.indices(sh)
# Hard-coded (1014,1014) WFC3/IR images
Npix = sh[0]*sh[1]
Nexp = len(grism['files'])
Nfix = len(data_fixed)
Nvary = len(data_vary)
Nimg = Nexp*Nvary + Nfix
A = np.zeros((Npix*Nexp, Nimg), dtype=np.float32)
data = np.zeros(Npix*Nexp, dtype=np.float32)
wht = data*0.
mask = data > -1
medians = np.zeros(Nexp)
exptime = np.ones(Nexp)
# Build combined arrays
if isACS:
bits = 64+32
else:
bits = 576
for i in range(Nexp):
flt = pyfits.open(grism['files'][i])
dq = utils.unset_dq_bits(flt['DQ', ext].data, okbits=bits)
dq_mask = dq == 0
# Data
data[i*Npix:(i+1)*Npix] = (flt['SCI', ext].data*dq_mask).flatten()
mask[i*Npix:(i+1)*Npix] &= dq_mask.flatten() # == 0
wht[i*Npix:(i+1)*Npix] = 1./(flt['ERR', ext].data**2*dq_mask).flatten()
wht[~np.isfinite(wht)] = 0.
if isACS:
exptime[i] = flt[0].header['EXPTIME']
data[i*Npix:(i+1)*Npix] /= exptime[i]
wht[i*Npix:(i+1)*Npix] *= exptime[i]**2
medians[i] = np.median(flt['SCI', ext].data[dq_mask]/exptime[i])
else:
medians[i] = np.median(flt['SCI', ext].data[dq_mask])
# Fixed arrays
for j in range(Nfix):
for k in range(Nexp):
A[k*Npix:(k+1)*Npix, j] = data_fixed[j]
mask_j = (data_fixed[j] > 0) & np.isfinite(data_fixed[j])
mask[i*Npix:(i+1)*Npix] &= mask_j
# Variable arrays
for j in range(Nvary):
k = Nfix+j+Nvary*i
A[i*Npix:(i+1)*Npix, k] = data_vary[j]
mask[i*Npix:(i+1)*Npix] &= np.isfinite(data_vary[j])
# Initial coeffs based on image medians
coeffs = np.array([np.min(medians)])
if Nvary > 0:
coeffs = np.hstack((coeffs, np.zeros(Nexp*Nvary)))
coeffs[1::Nvary] = medians-medians.min()
model = np.dot(A, coeffs)
coeffs_0 = coeffs
for iter in range(sky_iter):
model = np.dot(A, coeffs)
resid = (data-model)*np.sqrt(wht)
obj_mask = (resid < 2.5) & (resid > -3)
for j in range(Nexp):
obj_j = nd.minimum_filter(obj_mask[j*Npix:(j+1)*Npix], size=30)
obj_mask[j*Npix:(j+1)*Npix] = (obj_j > 0).flatten()
logstr = '# visit_grism_sky {0} > Iter: {1:d}, masked: {2:2.0f}%, {3}'
logstr = logstr.format(grism['product'], iter+1, obj_mask.sum()/Npix/Nimg*100, coeffs)
utils.log_comment(utils.LOGFILE, logstr, verbose=verbose)
out = np.linalg.lstsq(A[mask & obj_mask, :], data[mask & obj_mask],
rcond=utils.LSTSQ_RCOND)
coeffs = out[0]
# Test for convergence
if np.allclose(coeffs, coeffs_0, rtol=1.e-5, atol=iter_atol):
break
else:
coeffs_0 = coeffs
# Best-fit sky
sky = np.dot(A, coeffs).reshape(Nexp, Npix)
# log file
fp = open('{0}_{1}_sky_background.info'.format(grism['product'], ext), 'w')
fp.write('# file c1 {0}\n'.format(' '.join(['c{0:d}'.format(v+2)
for v in range(Nvary)])))
fp.write('# {0}\n'.format(grism['product']))
fp.write('# bg1: {0}\n'.format(bg_fixed[0]))
for v in range(Nvary):
fp.write('# bg{0:d}: {1}\n'.format(v+2, bg_vary[v]))
for j in range(Nexp):
file = grism['files'][j]
line = '{0} {1:9.4f}'.format(file, coeffs[0])
for v in range(Nvary):
k = Nfix + j*Nvary + v
line = '{0} {1:9.4f}'.format(line, coeffs[k])
fp.write(line+'\n')
fp.close()
if apply:
for j in range(Nexp):
file = grism['files'][j]
flt = pyfits.open(file, mode='update')
flt['SCI', ext].data -= sky[j, :].reshape(sh)*exptime[j]
header = flt[0].header
header['GSKYCOL{0:d}'.format(ext)] = (False, 'Subtract column average')
header['GSKYN{0:d}'.format(ext)] = (Nfix+Nvary, 'Number of sky images')
header['GSKY{0:d}01'.format(ext)] = (coeffs[0],
'Sky image {0} (fixed)'.format(bg_fixed[0]))
header['GSKY{0:d}01F'.format(ext)] = (bg_fixed[0], 'Sky image (fixed)')
for v in range(Nvary):
k = Nfix + j*Nvary + v
# print coeffs[k]
header['GSKY{0}{1:02d}'.format(ext, v+Nfix+1)] = (coeffs[k],
'Sky image {0} (variable)'.format(bg_vary[v]))
header['GSKY{0}{1:02d}F'.format(ext, v+Nfix+1)] = (bg_vary[v],
'Sky image (variable)')
flt.flush()
# Don't do `column_average` for ACS
if (not column_average) | isACS:
return isACS
######
# Now fit residual column average & make diagnostic plot
interactive_status = plt.rcParams['interactive']
plt.ioff()
fig = plt.figure(figsize=[6., 6.])
ax = fig.add_subplot(111)
im_shape = (1014, 1014)
for j in range(Nexp):
file = grism['files'][j]
resid = (data[j*Npix:(j+1)*Npix] - sky[j, :]).reshape(im_shape)
m = (mask & obj_mask)[j*Npix:(j+1)*Npix].reshape(im_shape)
# Statistics of masked arrays
ma = np.ma.masked_array(resid, mask=(~m))
med = np.ma.median(ma, axis=0)
bg_sky = 0
yrms = np.ma.std(ma, axis=0)/np.sqrt(np.sum(m, axis=0))
xmsk = np.arange(im_shape[0])
yres = med
yok = (~yrms.mask) & np.isfinite(yrms) & np.isfinite(xmsk) & np.isfinite(yres)
if yok.sum() == 0:
print('ERROR: No valid pixels found!')
continue
# Fit column average with smoothed Gaussian Process model
# if False:
# #### xxx old GaussianProcess implementation
# gp = GaussianProcess(regr='constant', corr='squared_exponential',
# theta0=8, thetaL=5, thetaU=12,
# nugget=(yrms/bg_sky)[yok][::1]**2,
# random_start=10, verbose=True, normalize=True)
#
# try:
# gp.fit(np.atleast_2d(xmsk[yok][::1]).T, yres[yok][::1]+bg_sky)
# except:
# warn = '# visit_grism_sky / GaussianProces failed!\n# visit_grism_sky / Check that this exposure wasn\'t fried by variable backgrounds.'
# print(warn)
# utils.log_exception(utils.LOGFILE, traceback)
# utils.log_comment(utils.LOGFILE, warn)
#
# continue
#
# y_pred, MSE = gp.predict(np.atleast_2d(xmsk).T, eval_MSE=True)
# gp_sigma = np.sqrt(MSE)
if use_spline:
# Fit with Spline basis functions
#NXSPL = 50
xpad = np.arange(-1*NXSPL, im_shape[0]+1*NXSPL)
Aspl = utils.bspline_templates(xpad, degree=3,
df=4+im_shape[0]//NXSPL,
get_matrix=True, log=False,
clip=0.0001)[1*NXSPL:-1*NXSPL, :]
Ax = (Aspl.T/yrms).T
cspl, _, _, _ = np.linalg.lstsq(Ax, (yres+bg_sky)/yrms,
rcond=utils.LSTSQ_RCOND)
y_pred = Aspl.dot(cspl)
try:
ND = 100
#covar = np.matrix(np.dot(Ax.T, Ax)).I.A
covar = utils.safe_invert(np.dot(Ax.T, Ax))
draws = np.random.multivariate_normal(cspl, covar, ND)
gp_sigma = np.std(Aspl.dot(draws.T), axis=1)
except:
gp_sigma = y_pred*0.
else:
# Updated sklearn GaussianProcessRegressor
nmad_y = utils.nmad(yres)
gpscl = 100 # rough normalization
k1 = 0.3**2 * RBF(length_scale=80) # Background variations
k2 = 1**2 * WhiteKernel(noise_level=(nmad_y*gpscl)**2) # noise
gp_kernel = k1+k2 # +outliers
yok &= np.abs(yres-np.median(yres)) < 50*nmad_y
gp = GaussianProcessRegressor(kernel=gp_kernel,
alpha=nmad_y*gpscl/5,
optimizer='fmin_l_bfgs_b',
n_restarts_optimizer=0,
normalize_y=False,
copy_X_train=True,
random_state=None)
gp.fit(np.atleast_2d(xmsk[yok][::1]).T,
(yres[yok][::1]+bg_sky)*gpscl)
y_pred, gp_sigma = gp.predict(np.atleast_2d(xmsk).T,
return_std=True)
gp_sigma /= gpscl
y_pred /= gpscl
# Plot Results
pi = ax.plot(med, alpha=0.1, zorder=-100)
ax.plot(y_pred-bg_sky, color=pi[0].get_color())
ax.fill_between(xmsk, y_pred-bg_sky-gp_sigma, y_pred-bg_sky+gp_sigma,
color=pi[0].get_color(), alpha=0.3,
label=grism['files'][j].split('_fl')[0])
# result
fp = open(file.replace('_flt.fits', '_column.dat'), 'wb')
fp.write(b'# column obs_resid ok resid uncertainty\n')
np.savetxt(fp, np.array([xmsk, yres, yok*1, y_pred-bg_sky, gp_sigma]).T, fmt='%.5f')
fp.close()
if apply:
# Subtract the column average in 2D & log header keywords
gp_res = np.dot(y_pred[:, None]-bg_sky, np.ones((1014, 1)).T).T
flt = pyfits.open(file, mode='update')
flt['SCI', 1].data -= gp_res
flt[0].header['GSKYCOL'] = (True, 'Subtract column average')
flt.flush()
# Finish plot
ax.legend(loc='lower left', fontsize=10)
ax.plot([-10, 1024], [0, 0], color='k')
ax.set_xlim(-10, 1024)
ax.set_xlabel(r'pixel column ($x$)')
ax.set_ylabel(r'column average (e-/s)')
ax.set_title(grism['product'])
ax.grid()
fig.tight_layout(pad=0.1)
fig.savefig('{0}_column.png'.format(grism['product']))
#fig.savefig('%s_column.pdf' %(grism['product']))
plt.close()
# Clean up large arrays
del(data)
del(A)
del(wht)
del(mask)
del(model)
if interactive_status:
plt.ion()
return False
def fix_star_centers(root='macs1149.6+2223-rot-ca5-22-032.0-f105w',
mag_lim=22, verbose=True, drizzle=False,
cutout_size=16):
"""Unset the CR bit (4096) in the centers of bright objects
Parameters
----------
root : str
Root name of drizzle product (direct imaging).
mag_lim : float
Magnitude limit of objects to consider
verbose : bool
Print messages to the terminal
drizzle : bool
Redrizzle the output image
cutout_size : int
Size of the cutout to extract around the bright stars
Returns
-------
Nothing, updates FLT files in place.
"""
frame = inspect.currentframe()
utils.log_function_arguments(utils.LOGFILE, frame,
'prep.fix_star_centers')
from drizzlepac.astrodrizzle import AstroDrizzle
EPSF = utils.EffectivePSF()
sci = pyfits.open('{0}_drz_sci.fits'.format(root))
#cat = Table.read('{0}.cat'.format(root), format='ascii.commented_header')
cat = utils.GTable.gread('{0}.cat.fits'.format(root))
# Load FITS files
N = sci[0].header['NDRIZIM']
images = []
wcs = []
for i in range(N):
flt = pyfits.open(sci[0].header['D{0:03d}DATA'.format(i+1)].split('[')[0], mode='update')
# if True:
# flt = pyfits.open('../RAW/'+sci[0].header['D{0:03d}DATA'.format(i+1)].split('[')[0], mode='update')
wcs.append(pywcs.WCS(flt[1], relax=True))
images.append(flt)
yp, xp = np.indices((1014, 1014))
use = cat['MAG_AUTO'] < mag_lim
so = np.argsort(cat['MAG_AUTO'][use])
if verbose:
print('# {0:6s} {1:12s} {2:12s} {3:7s} {4} {5}'.format('id', 'ra',
'dec', 'mag',
'nDQ', 'nSat'))
for line in cat[use][so]:
rd = line['X_WORLD'], line['Y_WORLD']
nset = []
nsat = []
for i in range(N):
xi, yi = wcs[i].all_world2pix([rd[0], ], [rd[1], ], 0)
r = np.sqrt((xp-xi[0])**2 + (yp-yi[0])**2)
unset = (r <= 3) & ((images[i]['DQ'].data & 4096) > 0)
nset.append(unset.sum())
if nset[i] > 0:
images[i]['DQ'].data[unset] -= 4096
# Fill saturated with EPSF fit
satpix = (r <= 10) & (((images[i]['DQ'].data & 256) > 0) | ((images[i]['DQ'].data & 2048) > 0))
nsat.append(satpix.sum())
if nsat[i] > 0:
xpi = int(np.round(xi[0]))
ypi = int(np.round(yi[0]))
slx = slice(xpi-cutout_size, xpi+cutout_size)
sly = slice(ypi-cutout_size, ypi+cutout_size)
sci = images[i]['SCI'].data[sly, slx]
dq = images[i]['DQ'].data[sly, slx]
dqm = dq - (dq & 2048)
err = images[i]['ERR'].data[sly, slx]
mask = satpix[sly, slx]
ivar = 1/err**2
ivar[(~np.isfinite(ivar)) | (dqm > 0)] = 0
# Fit the EPSF model
try:
psf_filter = images[0][0].header['FILTER']
Np = 15
guess = [cutout_size-1, cutout_size-1]
#guess = None
tol = 1.e-3
psf_params = EPSF.fit_ePSF(sci, ivar=ivar, center=None,
tol=tol, N=Np,
origin=(ypi-cutout_size, xpi-cutout_size),
filter=psf_filter, get_extended=True,
method='Powell', only_centering=True,
guess=guess, psf_params=None)
result = EPSF.fit_ePSF(sci, ivar=ivar, center=None,
tol=tol, N=Np,
origin=(ypi-cutout_size, xpi-cutout_size),
filter=psf_filter, get_extended=True,
method='Powell', only_centering=True,
guess=guess, psf_params=psf_params)
psf, psf_bkg, psfA, psf_coeffs = result
# psf = EPSF.get_ePSF(psf_params,
# origin=(ypi-cutout_size, xpi-cutout_size),
# shape=sci.shape, filter=psf_filter,
# get_extended=True)
# if i == 0:
# break
except:
continue
sci[mask] = psf[mask]
dq[mask] -= (dq[mask] & 2048)
#dq[mask] -= (dq[mask] & 256)
#dq[mask] |= 512
if verbose:
print('{0:6d} {1:12.6f} {2:12.6f} {3:7.2f} {4} {5}'.format(
line['NUMBER'], rd[0], rd[1], line['MAG_AUTO'], nset, nsat))
# Overwrite image
for i in range(N):
images[i].flush()
if drizzle:
files = [flt.filename() for flt in images]
bits = 576
if root.startswith('par'):
pixfrac = 1.0
else:
pixfrac = 0.8
# Fix Nans:
for flt_file in files:
utils.fix_flt_nan(flt_file, bad_bit=4096, verbose=True)
AstroDrizzle(files, output=root,
clean=True, final_pixfrac=pixfrac, context=False,
resetbits=0, final_bits=bits, driz_sep_bits=bits,
preserve=False, driz_separate=False,
driz_sep_wcs=False, median=False, blot=False,
driz_cr=False, driz_cr_corr=False, build=False,
final_wht_type='IVM')
clean_drizzle(root)
#cat = make_drz_catalog(root=root)
cat = make_SEP_catalog(root=root)
def find_single_image_CRs(visit, simple_mask=False, with_ctx_mask=True,
run_lacosmic=True):
"""Use LACosmic to find CRs in parts of an ACS mosaic where only one
exposure was available
Parameters
----------
visit : dict
List of visit information from `~grizli.utils.parse_flt_files`.
simple_mask : bool
If true, set 1024 CR bit for all parts of a given FLT where it does
not overlap with any others in the visit. If False, then run
LACosmic to flag CRs in this area but keep the pixels.
run_lacosmic : bool
Run LA Cosmic.
Requires context (CTX) image `visit['product']+'_drc_ctx.fits`.
"""
from drizzlepac import astrodrizzle
try:
import lacosmicx
has_lacosmicx = True
except:
if run_lacosmic:
print('Warning (find_single_image_CRs): couldn\'t import lacosmicx')
utils.log_exception(utils.LOGFILE, traceback)
utils.log_comment(utils.LOGFILE, "# ! LACosmicx requested but not found")
has_lacosmicx = False
# try:
# import reproject
# HAS_REPROJECT = True
# except:
# HAS_REPROJECT = False
HAS_REPROJECT = False
ctx_files = glob.glob(visit['product']+'_dr?_ctx.fits')
has_ctx = len(ctx_files) > 0
if has_ctx:
ctx = pyfits.open(ctx_files[0])
bits = np.log2(ctx[0].data)
mask = ctx[0].data == 0
#single_image = np.cast[np.float32]((np.cast[int](bits) == bits) & (~mask))
single_image = np.cast[np.float]((np.cast[int](bits) == bits) & (~mask))
ctx_wcs = pywcs.WCS(ctx[0].header)
ctx_wcs.pscale = utils.get_wcs_pscale(ctx_wcs)
else:
simple_mask = False
with_ctx_mask = False
for file in visit['files']:
flt = pyfits.open(file, mode='update')
# WFPC2
if '_c0' in file:
dq_hdu = pyfits.open(file.replace('_c0', '_c1'), mode='update')
dq_extname = 'SCI'
else:
dq_hdu = flt
dq_extname = 'DQ'
for ext in [1, 2, 3, 4]:
if ('SCI', ext) not in flt:
continue
flt_wcs = pywcs.WCS(flt['SCI', ext].header, fobj=flt, relax=True)
flt_wcs.pscale = utils.get_wcs_pscale(flt_wcs)
if has_ctx:
blotted = utils.blot_nearest_exact(single_image, ctx_wcs,
flt_wcs)
ctx_mask = blotted > 0
else:
ctx_mask = np.zeros(flt['SCI', ext].data.shape, dtype=bool)
sci = flt['SCI', ext].data
dq = dq_hdu[dq_extname, ext].data
if simple_mask:
print('{0}: Mask image without overlaps, extension {1:d}'.format(file, ext))
dq[ctx_mask] |= 1024
else:
print('{0}: Clean CRs with LACosmic, extension {1:d}'.format(file, ext))
if with_ctx_mask:
inmask = blotted == 0
else:
inmask = dq > 0
if run_lacosmic & has_lacosmicx:
crmask, clean = lacosmicx.lacosmicx(sci, inmask=inmask,
sigclip=4.5, sigfrac=0.3, objlim=5.0, gain=1.0,
readnoise=6.5, satlevel=65536.0, pssl=0.0,
niter=4, sepmed=True, cleantype='meanmask',
fsmode='median', psfmodel='gauss',
psffwhm=2.5, psfsize=7, psfk=None, psfbeta=4.765,
verbose=False)
else:
crmask = ctx_mask
if with_ctx_mask:
dq[crmask & ctx_mask] |= 1024
else:
dq[crmask] |= 1024
#sci[crmask & ctx_mask] = 0
flt.flush()
def clean_amplifier_residuals(files, extensions=[1,2], minpix=5e5, max_percentile=99, seg_hdu=None, skip=10, polynomial_degree=3, verbose=True, imsh_kwargs={'vmin':-1.e-3, 'vmax':1.e-3, 'cmap':'magma'}):
"""
Fit and remove a 2D polynomial fit to the detector-frame UVIS/WFC images
Parameters
----------
files : list
List of FLC files
extensions : list
List of extensions to consider (ACS and UVIS have two). Extensions
will be addressed as ``im['SCI',ext]``.
minpix : int
Minimum number of unmasked pixels required to perform the fit
seg_hdu : `~astropy.fits.ImageHDU`
Optional HDU defining a mask for the individual exposures. Will
be blotted to the FLC frame and valid pixels are taken to be where
seg_hdu.data == 0 (e.g., a segmentation image).
skip : int
Pass every ``skip`` (unmasked) pixel to the polynomial fit.
polynomial_degree : int
Order of the `~astropy.models.Polynomial2D` model to use.
verbose : bool
Print status information
imsh_kwargs : dict
Args to pass to `~matplotlib.pyplot.imshow` for the figure
"""
from astropy.modeling.fitting import LinearLSQFitter
from astropy.modeling.models import Polynomial2D
from matplotlib.ticker import MultipleLocator
if len(files) == 0:
print('No files specified')
return False
if files[0].startswith('j'):
# ACS WFC
sh = (2048, 4096)
else:
sh = (2051, 4096)
yp, xp = np.indices(sh, dtype=np.float32)
yp /= sh[0]+1
xp /= sh[1]+1
num = np.zeros(sh, dtype=np.float32)
den = np.zeros(sh, dtype=np.float32)
if seg_hdu is not None:
seg_wcs = pywcs.WCS(seg_hdu.header)
ims = [pyfits.open(file, mode='update') for file in files]
n_ext = len(extensions)
fig, axes = plt.subplots(nrows=n_ext, ncols=1, figsize=(4, 2*n_ext),
sharex=True)
for ext in extensions:
for im in ims:
wht = 1/im['ERR',ext].data**2*(im['DQ',ext].data == 0)
valid = np.isfinite(wht) & np.isfinite(im['SCI',ext].data)
if verbose:
print(f' Process {im.filename()}[{ext}]')
if seg_hdu is not None:
flc_wcs = pywcs.WCS(im['SCI',ext].header, fobj=im)
_blt = utils.blot_nearest_exact(seg_hdu.data, seg_wcs,
flc_wcs, verbose=False,
stepsize=-1,
scale_by_pixel_area=False,
wcs_mask=True, fill_value=0)
valid &= (_blt == 0)
wht[~valid] = 0
if 'MDRIZSKY' in im['SCI',ext].header:
bkg = im['SCI',ext].header['MDRIZSKY']
else:
bkg = np.median(im['SCI', ext].data[valid])
_sci = (im['SCI',ext].data - bkg)/im[0].header['EXPTIME']
num += _sci*wht
den += wht
avg = num/den
avg[den == 0] = 0
# Amps in detector middle
poly = Polynomial2D(polynomial_degree)
fitter = LinearLSQFitter()
quad_model = np.zeros(sh, dtype=np.float32)
_h = pyfits.Header()
_h['QORDER'] = polynomial_degree, 'Quad polynomial degree'
for q in [1,2]:
quad = (xp >= (q-1)/2.) & (xp < q/2.)
clip = quad & (den > 0)
limit = np.percentile(avg[clip], max_percentile)
clip &= avg < limit
if clip.sum() < minpix:
print('Warning: not enough pixels found for ext:{ext} q:{q}')
continue
_fit = fitter(poly, xp[clip][::skip], yp[clip][::skip],
avg[clip][::skip])
quad_model[quad] = _fit(xp[quad], yp[quad])
for name, val in zip(_fit.param_names, _fit.parameters):
_h[f'Q{q}_{name}'] = (val, 'Quad polynomial component')
axes[::-1][ext-1].imshow(quad_model, **imsh_kwargs)
axes[::-1][ext-1].text(0.05, 0.95, f'Ext {ext}', ha='left', va='top',
color='w', fontsize=10,
transform=axes[::-1][ext-1].transAxes)
axes[::-1][ext-1].text(0.05, 0.05, f'Q1', ha='left', va='bottom',
color='w', fontsize=8,
transform=axes[::-1][ext-1].transAxes)
axes[::-1][ext-1].text(0.55, 0.05, f'Q2', ha='left', va='bottom',
color='w', fontsize=8,
transform=axes[::-1][ext-1].transAxes)
for im in ims:
expt = im[0].header['EXPTIME']
im['SCI',ext].data -= quad_model*expt
for key, comment in zip(_h.keys(), _h.comments):
if key in im['SCI', ext].header:
# Already exists, add polynomial component
im['SCI',ext].header[key] += _h[key]
else:
im['SCI',ext].header[key] = (_h[key], comment)
for ax in axes:
ax.xaxis.set_major_locator(MultipleLocator(1024))
ax.xaxis.set_minor_locator(MultipleLocator(256))
ax.yaxis.set_major_locator(MultipleLocator(1024))
ax.yaxis.set_minor_locator(MultipleLocator(256))
fig.tight_layout(pad=0.5)
for im in ims:
im.flush()
return fig
def drizzle_overlaps(exposure_groups, parse_visits=False, check_overlaps=True, max_files=999, pixfrac=0.8, scale=0.06, skysub=True, skymethod='localmin', skyuser='MDRIZSKY', bits=None, build=False, final_wcs=True, final_rot=0, final_outnx=None, final_outny=None, final_ra=None, final_dec=None, final_wht_type='EXP', final_wt_scl='exptime', final_kernel='square', context=False, static=True, use_group_footprint=False, fetch_flats=True, fix_wcs_system=False, include_saturated=False, run_driz_cr=False, driz_cr_snr=None, driz_cr_scale=None, resetbits=0, driz_cr_snr_grow=1, driz_cr_scale_grow=1, log=False, **kwargs):
"""Combine overlapping visits into single output mosaics
Parameters
----------
exposure_groups : list
Output list of visit information from `~grizli.utils.parse_flt_files`.
parse_visits : bool
If set, parse the `exposure_groups` list for overlaps with
`~grizli.utils.parse_visit_overlaps`, otherwise assume that it has
already been parsed.
check_overlaps: bool
Only pass exposures that overlap with the desired output mosaic to
AstroDrizzle.
max_files : bool
Split output products if the number of exposures in a group is greater
than `max_files`. Default value of 999 appropriate for AstroDrizzle,
which crashes because it tries to create a header keyword with only
three digits (i.e., 0-999).
pixfrac : float
`~drizzlepac.astrodrizzle.AstroDrizzle` "pixfrac" value.
scale : type
`~drizzlepac.astrodrizzle.AstroDrizzle` "scale" value, output pixel
scale in `~astropy.units.arcsec`.
skysub : bool
Run `~drizzlepac.astrodrizzle.AstroDrizzle` sky subtraction.
bits : None or int
Data quality bits to treat as OK. If None, then default to 64+32 for
ACS and 512+64 for WFC3/IR.
final_* : Parameters passed through to AstroDrizzle to define output WCS
Note that these are overridden if an exposure group has a 'reference'
keyword pointing to a reference image / WCS.
Returns
-------
Produces drizzled images.
"""
if log:
frame = inspect.currentframe()
utils.log_function_arguments(utils.LOGFILE, frame,
'prep.drizzle_overlaps')
from drizzlepac.astrodrizzle import AstroDrizzle
from shapely.geometry import Polygon
if parse_visits:
exposure_groups = utils.parse_visit_overlaps(exposure_groups, buffer=15.)
# Drizzle can only handle 999 files at a time
if check_overlaps:
for group in exposure_groups:
if 'reference' not in group:
continue
if 'footprints' in group:
footprints = group['footprints']
elif ('footprint' in group) & use_group_footprint:
footprints = [group['footprint']]*len(group['files'])
else:
footprints = []
files = group['files']
for i in range(len(files)):
print(i, files[i])
im = pyfits.open(files[i])
p_i = None
for ext in [1, 2, 3, 4]:
if ('SCI', ext) in im:
wcs = pywcs.WCS(im['SCI', ext], fobj=im)
fp_x = wcs.calc_footprint()
if p_i is None:
p_i = Polygon(fp_x)
else:
p_i = p_i.union(fp_x)
footprints.append()
ref = pyfits.getheader(group['reference'])
wcs = pywcs.WCS(ref)
ref_fp = Polygon(wcs.calc_footprint())
files = []
out_fp = []
if 'awspath' in group:
aws = []
for j in range(len(group['files'])):
olap = ref_fp.intersection(footprints[j])
if olap.area > 0:
files.append(group['files'][j])
if 'awspath' in group:
aws.append(group['awspath'][j])
out_fp.append(footprints[j])
print(group['product'], len(files), len(group['files']))
group['files'] = files
group['footprints'] = out_fp
if 'awspath' in group:
group['awspath'] = aws
# Download the file from aws. The 'awspath' entry
# is a list with the same length of 'files', and starts with
# the bucket name.
if 'awspath' in group:
import boto3
session = boto3.Session()
s3 = boto3.resource('s3')
bkt = None
for awspath, file in zip(group['awspath'], group['files']):
if os.path.exists(file):
continue
spl = awspath.split('/')
bucket_name = spl[0]
path_to_file = '/'.join(spl[1:])
if bkt is None:
bkt = s3.Bucket(bucket_name)
else:
if bkt.name != bucket_name:
bkt = s3.Bucket(bucket_name)
s3_file = (path_to_file+'/'+file).replace('//', '/')
print('Fetch from s3: s3://{0}/{1}'.format(bucket_name, s3_file))
bkt.download_file(s3_file, file,
ExtraArgs={"RequestPayer": "requester"})
if max_files > 0:
all_groups = []
for group in exposure_groups:
N = len(group['files']) // int(max_files) + 1
if N == 1:
all_groups.append(group)
else:
for k in range(N):
sli = slice(k*max_files, (k+1)*max_files)
files_list = group['files'][sli]
root = '{0}-{1:03d}'.format(group['product'], k)
g_k = OrderedDict(product=root,
files=files_list,
reference=group['reference'])
if 'footprints' in group:
g_k['footprints'] = group['footprints'][sli]
all_groups.append(g_k)
else:
all_groups = exposure_groups
for group in all_groups:
if len(group['files']) == 0:
continue
isACS = '_flc' in group['files'][0]
isWFPC2 = '_c0' in group['files'][0]
if (driz_cr_snr is None) | (driz_cr_scale is None):
if isACS:
driz_cr_snr = '3.5 3.0'
driz_cr_scale = '1.2 0.7'
elif isWFPC2:
driz_cr_snr = '3.5 3.0'
driz_cr_scale = '1.2 0.7'
else:
driz_cr_snr = '8.0 5.0'
driz_cr_scale = '2.5 0.7'
if driz_cr_snr_grow != 1:
spl = driz_cr_snr.split()
new_snr = np.cast[float](spl)*driz_cr_snr_grow
driz_cr_snr = ' '.join([f'{val:.2f}' for val in new_snr])
if driz_cr_scale_grow != 1:
spl = driz_cr_scale.split()
new_scale = np.cast[float](spl)*driz_cr_scale_grow
driz_cr_scale = ' '.join([f'{val:.2f}' for val in new_scale])
if bits is None:
if isACS | isWFPC2:
bits = 64+32
else:
bits = 576
if include_saturated:
bits |= 256
# All the same instrument?
inst_keys = np.unique([os.path.basename(file)[0] for file in group['files']])
print('\n\n### drizzle_overlaps: {0} ({1})\n'.format(group['product'],
len(group['files'])))
if fetch_flats:
# PFL files needed for IVM weights
for file in group['files']:
try:
if isWFPC2:
im = pyfits.open(file)
flat_file = im[0].header['FLATFILE'].strip('uref$')
utils.fetch_wfpc2_calib(file=flat_file,
path=os.getenv('uref'),
use_mast=False, verbose=True,
overwrite=True)
else:
utils.fetch_hst_calibs(file, calib_types=['PFLTFILE'],
verbose=False)
except:
utils.log_exception(utils.LOGFILE, traceback)
# Fetch files from aws
if 'reference' in group:
AstroDrizzle(group['files'], output=group['product'],
clean=True, context=context, preserve=False,
skysub=skysub, skyuser=skyuser, skymethod=skymethod,
driz_separate=run_driz_cr, driz_sep_wcs=run_driz_cr,
median=run_driz_cr, blot=run_driz_cr,
driz_cr=run_driz_cr,
driz_cr_snr=driz_cr_snr, driz_cr_scale=driz_cr_scale,
driz_cr_corr=False, driz_combine=True,
final_bits=bits, coeffs=True, build=build,
final_wht_type=final_wht_type,
final_wt_scl=final_wt_scl,
final_pixfrac=pixfrac,
final_wcs=True, final_refimage=group['reference'],
final_kernel=final_kernel,
resetbits=resetbits,
static=(static & (len(inst_keys) == 1)))
else:
AstroDrizzle(group['files'], output=group['product'],
clean=True, context=context, preserve=False,
skysub=skysub, skyuser=skyuser, skymethod=skymethod,
driz_separate=run_driz_cr, driz_sep_wcs=run_driz_cr,
median=run_driz_cr, blot=run_driz_cr,
driz_cr=run_driz_cr,
driz_cr_snr=driz_cr_snr, driz_cr_scale=driz_cr_scale,
driz_cr_corr=False, driz_combine=True,
final_bits=bits, coeffs=True, build=build,
final_wht_type=final_wht_type,
final_wt_scl=final_wt_scl,
final_pixfrac=pixfrac,
final_wcs=final_wcs, final_rot=final_rot,
final_scale=scale,
final_ra=final_ra, final_dec=final_dec,
final_outnx=final_outnx, final_outny=final_outny,
final_kernel=final_kernel,
resetbits=resetbits,
static=(static & (len(inst_keys) == 1)))
clean_drizzle(group['product'], fix_wcs_system=fix_wcs_system)
def manual_alignment(visit, ds9, reference=None, reference_catalogs=['SDSS', 'PS1', 'GAIA', 'WISE'], use_drz=False):
"""Manual alignment of a visit with respect to an external region file
Parameters
----------
visit : dict
List of visit information from `~grizli.utils.parse_flt_files`.
ds9 : `~grizli.ds9.DS9`
DS9 instance for interaction. Requires `~pyds9` and the extended
methods in `~grizli.ds9.DS9`.
reference : str
Filename of a DS9 region file that will be used as reference. If
None, then tries to find a local file based on the `visit['product']`.
reference_catalogs : list
If no valid `reference` file provided or found, query external
catalogs with `~grizli.prep.get_radec_catalog`. The external
catalogs will be queried in the order specified in this list.
Returns
-------
Generates a file like `{{0}}.align_guess'.format(visit['product'])` that
the alignment scripts know how to read.
.. note::
The alignment here is done interactively in the DS9 window. The script
prompts you to first center the frame on a source in the image itself,
which can be done in "panning" mode. After centering, hit <enter> in the
command line. The script will then prompt to center the frame on the
corresponding region from the reference file. After recentering, type
enter again and the output file will be computed and stored.
If you wish to break out of the script and not generate the output file,
type any character in the terminal at the first pause/prompt.
"""
import os
ref_image = os.path.join(os.getcwd(), '../RAW/', visit['files'][0])
files = glob.glob('{0}_dr?_sci.fits'.format(visit['product']))
ext = 1
if use_drz & (len(files) > 0):
ref_image = files[0]
ext = 0
im = pyfits.open(ref_image)
ra, dec = im[ext].header['CRVAL1'], im[ext].header['CRVAL2']
if reference is None:
reg_files = glob.glob('{0}_*reg'.format(visit['product']))
if len(reg_files) == 0:
get_radec_catalog(ra=ra, dec=dec, radius=3.,
product=visit['product'], verbose=True,
reference_catalogs=reference_catalogs,
date=im[0].header['EXPSTART'],
date_format='mjd')
reg_files = glob.glob('{0}_*reg'.format(visit['product']))
reference = os.path.join(os.getcwd(), reg_files[0])
print(visit['product'], reference)
#im = pyfits.open('{0}_drz_sci.fits'.format(visit['product']))
#ds9.view(im[1].data, header=im[1].header)
if '_c0' in im.filename():
ds9.set('file {0}[3]'.format(im.filename()))
else:
ds9.set('file {0}'.format(im.filename()))
ds9.set('regions file '+reference)
x = input('pan to object in image: ')
if x:
print('Input detected ({0}). Abort.'.format(x))
return False
x0 = np.cast[float](ds9.get('pan image').split())
x = input('pan to object in region: ')
x1 = np.cast[float](ds9.get('pan image').split())
print('Saved {0}.align_guess'.format(visit['product']))
dx = x0[0]-x1[0]
dy = x0[1]-x1[1]
if '_c0' in im.filename():
dx *= -1
dy * + -1
np.savetxt('{0}.align_guess'.format(visit['product']), [[dx, dy, 0, 1].__repr__()[1:-1].replace(',', '')], fmt='%s')
def extract_fits_log(file='idk106ckq_flt.fits', get_dq=True):
"""
not used
"""
log = OrderedDict()
im = pyfits.open(file)
for k in im[0].header:
if k in ['HISTORY', 'COMMENT', 'ORIGIN', '']:
continue
if k.strip().startswith('/'):
continue
log[k] = im[0].header[k]
log['chips'] = []
if get_dq:
idx = np.arange(1014**2, dtype=np.int32).reshape((1014, 1014))
for chip in [1, 2, 3, 4]:
key = 'SCI{0}'.format(chip)
if ('SCI', chip) in im:
log['chips'].append(chip)
log[key] = OrderedDict()
h = im['SCI', chip].header
for k in h:
if k in ['HISTORY', 'COMMENT', 'ORIGIN', '']:
continue
if k.strip().startswith('/'):
continue
log[key][k] = h[k]
if get_dq:
dq = im['DQ', chip].data
mask = dq > 0
log['DQi{0}'.format(chip)] = list(idx[mask].astype(str))
log['DQv{0}'.format(chip)] = list(dq[mask].astype(str))
return log
|
gbrammer/grizli
|
grizli/prep.py
|
Python
|
mit
| 197,852
|
[
"Gaussian",
"VisIt"
] |
5f608bf38bac710f7006370f509d1ebbad0379b208c59123d00b60e717511980
|
import os
import unittest
from os.path import join
from pymatgen.core.structure import Molecule
from pymatgen.io.adf import AdfInput, AdfKey, AdfOutput, AdfTask
from pymatgen.util.testing import PymatgenTest
__author__ = "Xin Chen, chenxin13@mails.tsinghua.edu.cn"
test_dir = os.path.join(PymatgenTest.TEST_FILES_DIR, "molecules")
geometry_string = """GEOMETRY
smooth conservepoints
optim all cartesian
iterations 250
step rad=0.15 angle=10.0
hessupd BFGS
converge e=0.001 grad=0.0003 rad=0.01 angle=0.5
END
"""
zlmfit_string = """ZLMFIT
AtomDepQuality
10 good
12 normal
subend
END
"""
atoms_string = """ATOMS
O -0.90293455 0.66591421 0.00000000
H 0.05706545 0.66591421 0.00000000
H -1.22338913 1.57085004 0.00000000
END
"""
h2oxyz = """3
0.0
O -0.90293455 0.66591421 0.0
H 0.05706545 0.66591421 0.0
H -1.22338913 1.57085004 0.0
"""
rhb18xyz = """19
0.0
Rh -0.453396 -0.375115 0.000000
B 0.168139 3.232791 0.000000
B -0.270938 1.639058 0.000000
B 0.206283 2.604044 1.459430
B 0.404410 1.880136 2.866764
B -0.103309 0.887485 1.655272
B 0.436856 0.371367 3.299887
B 0.016593 -0.854959 1.930982
B 0.563233 -1.229713 3.453066
B 0.445855 -2.382027 2.415013
B 0.206283 2.604044 -1.459430
B 0.404410 1.880136 -2.866764
B -0.103309 0.887485 -1.655272
B 0.436856 0.371367 -3.299887
B 0.563233 -1.229713 -3.453066
B 0.016593 -0.854959 -1.930982
B 0.200456 -2.309538 -0.836316
B 0.200456 -2.309538 0.836316
B 0.445855 -2.382027 -2.415013
"""
def readfile(file_object):
"""
Return the content of the file as a string.
Parameters
----------
file_object : file or str
The file to read. This can be either a File object or a file path.
Returns
-------
content : str
The content of the file.
"""
if hasattr(file_object, "read"):
return file_object.read()
elif isinstance(file_object, str):
f = open(file_object, "r")
content = f.read()
f.close()
return content
else:
raise ValueError("``file_object`` must be a string or a file object!")
class AdfKeyTest(unittest.TestCase):
def test_simple(self):
unrestricted = AdfKey("unrestricted")
self.assertEqual(str(unrestricted).strip(), "UNRESTRICTED")
def test_options(self):
charge = AdfKey("charge", [-1, 0])
charge_string = "CHARGE -1 0\n"
self.assertEqual(str(charge), "CHARGE -1 0\n")
self.assertEqual(str(AdfKey.from_dict(charge.as_dict())), charge_string)
def test_subkeys(self):
smooth = AdfKey("smooth", ["conservepoints"])
optim = AdfKey("optim", ["all", "cartesian"])
iterations = AdfKey("iterations", [250])
step = AdfKey("step", [("rad", 0.15), ("angle", 10.0)])
hessupd = AdfKey("hessupd", ["BFGS"])
converge = AdfKey(
"converge",
[("e", 1.0e-3), ("grad", 3.0e-4), ("rad", 1.0e-2), ("angle", 0.5)],
)
geo = AdfKey("geometry", subkeys=[smooth, optim, iterations, step, hessupd, converge])
self.assertEqual(str(geo), geometry_string)
self.assertEqual(str(AdfKey.from_dict(geo.as_dict())), geometry_string)
self.assertTrue(geo.has_subkey("optim"))
def test_end(self):
geo = AdfKey("Geometry")
self.assertEqual(str(geo), "GEOMETRY\nEND\n")
def test_subkeys_subkeys(self):
atom_dep_quality = AdfKey("AtomDepQuality", subkeys=[AdfKey("10", ["good"]), AdfKey("12", ["normal"])])
zlmfit = AdfKey("zlmfit", subkeys=[atom_dep_quality])
self.assertEqual(str(zlmfit), zlmfit_string)
self.assertEqual(str(AdfKey.from_dict(zlmfit.as_dict())), zlmfit_string)
def test_from_string(self):
k1 = AdfKey.from_string("CHARGE -1 0")
self.assertEqual(k1.key, "CHARGE")
self.assertListEqual(k1.options, [-1, 0])
k2 = AdfKey.from_string("step rad=0.15 angle=10.0")
self.assertEqual(k2.key, "step")
self.assertListEqual(k2.options[0], ["rad", 0.15])
self.assertListEqual(k2.options[1], ["angle", 10.0])
k3 = AdfKey.from_string("GEOMETRY\noptim all\niterations 100\nEND\n")
self.assertEqual(k3.key, "GEOMETRY")
self.assertEqual(k3.subkeys[0].options[0], "all")
self.assertEqual(k3.subkeys[1].options[0], 100)
k4 = AdfKey.from_string(
"""SCF
iterations 300
converge 1.0e-7 1.0e-7
mixing 0.2
diis n=100 ok=0.0001 cyc=100 cx=5.0 cxx=10.0
END"""
)
self.assertEqual(k4.key, "SCF")
self.assertEqual(k4.subkeys[0].key, "iterations")
self.assertEqual(k4.subkeys[1].key, "converge")
self.assertEqual(k4.subkeys[1].options[0], 1e-7)
self.assertEqual(k4.subkeys[2].options[0], 0.2)
def test_option_operations(self):
k1 = AdfKey("Charge", [-1, 0])
k1.add_option(2)
self.assertListEqual(k1.options, [-1, 0, 2])
k1.remove_option(0)
self.assertListEqual(k1.options, [0, 2])
k2 = AdfKey.from_string("step rad=0.15 angle=10.0")
k2.add_option(["length", 0.1])
self.assertListEqual(k2.options[2], ["length", 0.1])
k2.remove_option("rad")
self.assertListEqual(k2.options[0], ["angle", 10.0])
def test_atom_block_key(self):
block = AdfKey("atoms")
o = Molecule.from_str(h2oxyz, "xyz")
for site in o:
block.add_subkey(AdfKey(str(site.specie), list(site.coords)))
self.assertEqual(str(block), atoms_string)
energy_task = """TITLE ADF_RUN
UNITS
length angstrom
angle degree
END
XC
GGA PBE
END
BASIS
type DZ
core small
END
SCF
iterations 300
END
GEOMETRY SinglePoint
END
"""
class AdfTaskTest(unittest.TestCase):
def test_energy(self):
task = AdfTask()
self.assertEqual(str(task), energy_task)
def test_serialization(self):
task = AdfTask()
o = AdfTask.from_dict(task.as_dict())
self.assertEqual(task.title, o.title)
self.assertEqual(task.basis_set, o.basis_set)
self.assertEqual(task.scf, o.scf)
self.assertEqual(task.geo, o.geo)
self.assertEqual(task.operation, o.operation)
self.assertEqual(task.units, o.units)
self.assertEqual(str(task), str(o))
rhb18 = {
"title": "RhB18",
"basis_set": AdfKey.from_string("BASIS\ntype TZP\ncore small\nEND"),
"xc": AdfKey.from_string("XC\nHybrid PBE0\nEND"),
"units": AdfKey.from_string("UNITS\nlength angstrom\nEND"),
"other_directives": [
AdfKey.from_string("SYMMETRY"),
AdfKey.from_string("RELATIVISTIC scalar zora"),
AdfKey.from_string("INTEGRATION 6.0 6.0 6.0"),
AdfKey.from_string("SAVE TAPE21"),
AdfKey.from_string("A1FIT 10.0"),
],
"geo_subkeys": [
AdfKey.from_string("optim all"),
AdfKey.from_string("iterations 300"),
AdfKey.from_string("step rad=0.15 angle=10.0"),
AdfKey.from_string("hessupd BFGS"),
],
"scf": AdfKey.from_string(
"""SCF
iterations 300
converge 1.0e-7 1.0e-7
mixing 0.2
lshift 0.0
diis n=100 ok=0.0001 cyc=100 cx=5.0 cxx=10.0
END"""
),
}
class AdfInputTest(unittest.TestCase):
def setUp(self):
self.tempfile = "./adf.temp"
def test_main(self):
o = Molecule.from_str(rhb18xyz, "xyz")
o.set_charge_and_spin(-1, 3)
task = AdfTask("optimize", **rhb18)
inp = AdfInput(task)
inp.write_file(o, self.tempfile)
s = readfile(join(test_dir, "adf", "RhB18_adf.inp"))
self.assertEqual(readfile(self.tempfile), s)
def tearDown(self):
if os.path.isfile(self.tempfile):
os.remove(self.tempfile)
class AdfOutputTest(unittest.TestCase):
def test_analytical_freq(self):
filename = join(test_dir, "adf", "analytical_freq", "adf.out")
o = AdfOutput(filename)
self.assertAlmostEqual(o.final_energy, -0.54340325)
self.assertEqual(len(o.energies), 4)
self.assertEqual(len(o.structures), 4)
self.assertAlmostEqual(o.frequencies[0], 1553.931)
self.assertAlmostEqual(o.frequencies[2], 3793.086)
self.assertAlmostEqual(o.normal_modes[0][2], 0.071)
self.assertAlmostEqual(o.normal_modes[0][6], 0.000)
self.assertAlmostEqual(o.normal_modes[0][7], -0.426)
self.assertAlmostEqual(o.normal_modes[0][8], -0.562)
def test_numerical_freq(self):
filename = join(test_dir, "adf", "numerical_freq", "adf.out")
o = AdfOutput(filename)
self.assertEqual(o.freq_type, "Numerical")
self.assertEqual(o.final_structure.num_sites, 4)
self.assertEqual(len(o.frequencies), 6)
self.assertEqual(len(o.normal_modes), 6)
self.assertAlmostEqual(o.frequencies[0], 938.21)
self.assertAlmostEqual(o.frequencies[3], 3426.64)
self.assertAlmostEqual(o.frequencies[4], 3559.35)
self.assertAlmostEqual(o.frequencies[5], 3559.35)
self.assertAlmostEqual(o.normal_modes[1][0], 0.067)
self.assertAlmostEqual(o.normal_modes[1][3], -0.536)
self.assertAlmostEqual(o.normal_modes[1][7], 0.000)
self.assertAlmostEqual(o.normal_modes[1][9], -0.536)
def test_single_point(self):
filename = join(test_dir, "adf", "sp", "adf.out")
o = AdfOutput(filename)
self.assertAlmostEqual(o.final_energy, -0.74399276)
self.assertEqual(len(o.final_structure), 4)
if __name__ == "__main__":
unittest.main()
|
gmatteo/pymatgen
|
pymatgen/io/tests/test_adf.py
|
Python
|
mit
| 9,858
|
[
"ADF",
"pymatgen"
] |
632820d1d5b8382dac967f1792ed256b7085fbb21a4580ef1d718c91f21d536e
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import division, print_function, absolute_import, unicode_literals
import ast
import codecs
import os
import shutil
from collections import OrderedDict
from six import string_types
from sphinx_explorer.plugin import extension
from sphinx_explorer.plugin.extension import Extension
CONF_PY_ENCODING = "utf-8"
# memo:
# ast -> code module:
# https://github.com/simonpercivall/astunparse
# noinspection PyMethodMayBeStatic
class MyNodeVisitor(ast.NodeVisitor):
def __init__(self, source_lines, replace_dict=None):
# type: ([string_types], dict) -> None
super(MyNodeVisitor, self).__init__()
self._source_lines = source_lines[:]
self.replace_dict = replace_dict or {}
self._params_map = OrderedDict()
def visit_Assign(self, node):
# type: (ast.Assign) -> ast.Assign
if len(node.targets) == 1:
if isinstance(node.targets[0], ast.Subscript):
return node
left_name = node.targets[0].id
if left_name in self.replace_dict:
new_line = "{} = {}\n".format(left_name, repr(self.replace_dict[left_name]))
self._source_lines[node.lineno - 1] = new_line
if isinstance(node.value, ast.Str):
self._params_map[left_name] = node.value.s
return node
def params(self):
return self._params_map
def visit(self, tree):
super(MyNodeVisitor, self).visit(tree)
return self._source_lines
class Parser(object):
def __init__(self, conf_path):
self._source = []
self._params = {}
# noinspection PyBroadException
try:
for line in codecs.open(conf_path, "r", CONF_PY_ENCODING).readlines():
self._source.append(line)
self._tree = ast.parse(open(conf_path, "r").read())
except:
pass
def add_sys_path(self, path_list):
if not path_list:
return
insert_sys_flag = False
new_lines = []
for line in self._source:
if line.strip() == "# import os":
new_lines.append("import os\n")
elif line.strip() == "# import sys":
new_lines.append("import sys\n")
elif "sys.path.insert" in line and insert_sys_flag is False:
insert_sys_flag = True
new_lines.append(line)
for path in path_list:
path = path.replace("\\", "\\\\")
new_lines.append("sys.path.insert(0, os.path.abspath('{}'))\n".format(path))
else:
new_lines.append(line)
self._source = new_lines
self._tree = ast.parse(self.dumps().encode(CONF_PY_ENCODING))
def assign_replace(self, replace_dict):
self._source = MyNodeVisitor(self._source, replace_dict).visit(self._tree)
return self._source
def append(self, line):
self._source.append(line)
def dumps(self):
return "".join(self._source)
def params(self):
if self._params:
return self._params
else:
# noinspection PyBroadException
try:
parser = MyNodeVisitor(self._source)
parser.visit(self._tree)
self._params = parser.params()
return parser.params()
except:
return {}
@property
def lines(self):
return self._source
def comment(parser, name):
comment_str = "# -- {} ".format(name)
comment_str += "-" * (75 - len(comment_str))
parser.append("\n")
parser.append("\n")
parser.append(comment_str)
parser.append("\n")
def extend_conf_py(conf_py_path, params, settings, extensions=None, insert_paths=None):
extensions = extensions or []
if os.path.isfile(conf_py_path):
parser = Parser(conf_py_path)
conf_py_dir_path = os.path.dirname(conf_py_path)
local_extension_path = os.path.join(conf_py_dir_path, "extension")
if params:
parser.assign_replace(params)
parser.add_sys_path(insert_paths)
for key in extensions:
if key.startswith("ext-") and params.get(key, False):
ext = extension.get(key) # type: Extension
if ext:
if ext.local_extension:
parser.add_sys_path(["./extension"])
src_path = ext.local_extension_file
dst_path = os.path.join(local_extension_path, os.path.basename(src_path))
if not os.path.exists(os.path.dirname(dst_path)):
os.makedirs(os.path.dirname(dst_path))
shutil.copy(src_path, dst_path)
comment(parser, ext.name)
parser.append(ext.generate_py_script(params, settings))
parser.append("\n")
# generate code
comment(parser, "Sphinx Explorer")
parser.append(config_dode)
parser.append("\n")
with codecs.open(conf_py_path, "w", CONF_PY_ENCODING) as fd:
fd.write(parser.dumps())
config_dode = """
import json
try:
conf_dict = json.load(open("conf.json"))
for key, value in conf_dict.items():
globals()[key] = value
except:
pass
""".strip()
|
pashango2/sphinx-explorer
|
sphinx_explorer/util/conf_py_parser.py
|
Python
|
mit
| 5,427
|
[
"VisIt"
] |
f471efeb1042f4292d8370766e42017901eb58bec7ceaab2b99336e56e255f1c
|
import numpy
import pdb
import numpy as Math
class DataGenerator(object):
def __init__(self, batch_size):
self.batch_size = batch_size
def fit(self, inputs, targets):
self.start = 0
self.inputs = inputs
self.targets = targets
def __next__(self):
return self.next()
def reset(self):
self.start = 0
def next(self):
if self.start < len(self.inputs):
input_ = self.inputs
target_ = self.targets
output1 = target_[self.start:(self.start + self.batch_size)]
output2 = input_[self.start:(self.start + self.batch_size)]
self.start += self.batch_size
return (output1, output2)
else:
self.reset()
return self.next()
#
# Implementation of t-SNE in Python taken from tsne.py
# Created by Laurens van der Maaten on 20-12-08.
# Copyright (c) 2008 Tilburg University. All rights reserved.
#
def Hbeta(D = Math.array([]), beta = 1.0):
"""Compute the perplexity and the P-row for a specific value of the precision of a Gaussian distribution."""
# Compute P-row and corresponding perplexity
P = Math.exp(-D.copy() * beta);
sumP = sum(P);
H = Math.log(sumP) + beta * Math.sum(D * P) / sumP;
P = P / sumP;
return H, P;
def x2p(X = Math.array([]), tol = 1e-5, perplexity = 30.0):
"""Performs a binary search to get P-values in such a way that each conditional Gaussian has the same perplexity."""
# Initialize some variables
print "Computing pairwise distances..."
(n, d) = X.shape;
sum_X = Math.sum(Math.square(X), 1);
D = Math.add(Math.add(-2 * Math.dot(X, X.T), sum_X).T, sum_X);
P = Math.zeros((n, n));
beta = Math.ones((n, 1));
logU = Math.log(perplexity);
# Loop over all datapoints
for i in range(n):
# Print progress
if i % 500 == 0:
print "Computing P-values for point ", i, " of ", n, "..."
# Compute the Gaussian kernel and entropy for the current precision
betamin = -Math.inf;
betamax = Math.inf;
Di = D[i, Math.concatenate((Math.r_[0:i], Math.r_[i+1:n]))];
(H, thisP) = Hbeta(Di, beta[i]);
# Evaluate whether the perplexity is within tolerance
Hdiff = H - logU;
tries = 0;
while Math.abs(Hdiff) > tol and tries < 50:
# If not, increase or decrease precision
if Hdiff > 0:
betamin = beta[i].copy();
if betamax == Math.inf or betamax == -Math.inf:
beta[i] = beta[i] * 2;
else:
beta[i] = (beta[i] + betamax) / 2;
else:
betamax = beta[i].copy();
if betamin == Math.inf or betamin == -Math.inf:
beta[i] = beta[i] / 2;
else:
beta[i] = (beta[i] + betamin) / 2;
# Recompute the values
(H, thisP) = Hbeta(Di, beta[i]);
Hdiff = H - logU;
tries = tries + 1;
# Set the final row of P
P[i, Math.concatenate((Math.r_[0:i], Math.r_[i+1:n]))] = thisP;
# Return final P-matrix
print "Mean value of sigma: ", Math.mean(Math.sqrt(1 / beta));
return P;
def pca(X = Math.array([]), no_dims = 50):
"""Runs PCA on the NxD array X in order to reduce its dimensionality to no_dims dimensions."""
print "Preprocessing the data using PCA..."
(n, d) = X.shape;
X = X - Math.tile(Math.mean(X, 0), (n, 1));
(l, M) = Math.linalg.eig(Math.dot(X.T, X));
Y = Math.dot(X, M[:,0:no_dims]);
return Y;
def tsne(X = Math.array([]), no_dims = 2, initial_dims = 50, perplexity = 30.0):
"""Runs t-SNE on the dataset in the NxD array X to reduce its dimensionality to no_dims dimensions.
The syntaxis of the function is Y = tsne.tsne(X, no_dims, perplexity), where X is an NxD NumPy array."""
# Check inputs
if isinstance(no_dims, float):
print "Error: array X should have type float.";
return -1;
if round(no_dims) != no_dims:
print "Error: number of dimensions should be an integer.";
return -1;
# Initialize variables
X = pca(X, initial_dims).real;
(n, d) = X.shape;
max_iter = 1000;
initial_momentum = 0.5;
final_momentum = 0.8;
eta = 500;
min_gain = 0.01;
Y = Math.random.randn(n, no_dims);
dY = Math.zeros((n, no_dims));
iY = Math.zeros((n, no_dims));
gains = Math.ones((n, no_dims));
# Compute P-values
P = x2p(X, 1e-5, perplexity);
P = P + Math.transpose(P);
P = P / Math.sum(P);
P = P * 4; # early exaggeration
P = Math.maximum(P, 1e-12);
# Run iterations
for iter in range(max_iter):
# Compute pairwise affinities
sum_Y = Math.sum(Math.square(Y), 1);
num = 1 / (1 + Math.add(Math.add(-2 * Math.dot(Y, Y.T), sum_Y).T, sum_Y));
num[range(n), range(n)] = 0;
Q = num / Math.sum(num);
Q = Math.maximum(Q, 1e-12);
# Compute gradient
PQ = P - Q;
for i in range(n):
dY[i,:] = Math.sum(Math.tile(PQ[:,i] * num[:,i], (no_dims, 1)).T * (Y[i,:] - Y), 0);
# Perform the update
if iter < 20:
momentum = initial_momentum
else:
momentum = final_momentum
gains = (gains + 0.2) * ((dY > 0) != (iY > 0)) + (gains * 0.8) * ((dY > 0) == (iY > 0));
gains[gains < min_gain] = min_gain;
iY = momentum * iY - eta * (gains * dY);
Y = Y + iY;
Y = Y - Math.tile(Math.mean(Y, 0), (n, 1));
# Compute current value of cost function
if (iter + 1) % 10 == 0:
C = Math.sum(P * Math.log(P / Q));
print "Iteration ", (iter + 1), ": error is ", C
# Stop lying about P-values
if iter == 100:
P = P / 4;
# Return solution
return Y;
|
MdAsifKhan/DNGR-Keras
|
utils.py
|
Python
|
mit
| 5,301
|
[
"Gaussian"
] |
a3aad97c97c3193dab0edaead9be5e8ee70a52dcd307b15eb1fcaee65da83817
|
#
# Copyright (C) 2017-2019 The ESPResSo project
#
# This file is part of ESPResSo.
#
# ESPResSo is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ESPResSo is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
import unittest as ut
import numpy as np
import pickle
import espressomd
import espressomd.observables
import espressomd.accumulators
N_PART = 4
class MeanVarianceCalculatorTest(ut.TestCase):
"""
Test class for the MeanVarianceCalculator accumulator.
"""
system = espressomd.System(box_l=[10.0] * 3)
system.cell_system.skin = 0.4
system.time_step = 0.01
def setUp(self):
np.random.seed(seed=42)
def tearDown(self):
self.system.part.clear()
self.system.auto_update_accumulators.clear()
def test_accumulator(self):
"""Check that accumulator results are the same as the respective numpy result.
"""
system = self.system
system.part.add(pos=np.zeros((N_PART, 3)))
obs = espressomd.observables.ParticlePositions(ids=range(N_PART))
acc = espressomd.accumulators.MeanVarianceCalculator(obs=obs)
system.auto_update_accumulators.add(acc)
positions = np.copy(system.box_l) * np.random.random((10, N_PART, 3))
for pos in positions:
system.part[:].pos = pos
acc.update()
self.assertEqual(acc.get_params()['obs'], obs)
pos_mean = np.mean(positions, axis=0)
pos_var = np.var(positions, axis=0, ddof=1)
pos_sem = np.sqrt(pos_var / len(positions))
np.testing.assert_allclose(acc.mean(), pos_mean, atol=1e-12)
np.testing.assert_allclose(acc.variance(), pos_var, atol=1e-12)
np.testing.assert_allclose(acc.std_error(), pos_sem, atol=1e-12)
# Check pickling
acc_unpkl = pickle.loads(pickle.dumps(acc))
np.testing.assert_allclose(acc_unpkl.mean(), pos_mean, atol=1e-12)
np.testing.assert_allclose(acc_unpkl.variance(), pos_var, atol=1e-12)
np.testing.assert_allclose(acc_unpkl.std_error(), pos_sem, atol=1e-12)
if __name__ == "__main__":
ut.main()
|
fweik/espresso
|
testsuite/python/accumulator_mean_variance.py
|
Python
|
gpl-3.0
| 2,616
|
[
"ESPResSo"
] |
fdb10e87abc6b7d39f4609d5bb3eb9c106f7f124834502c79d7cdad582b624d1
|
# -*- coding: utf-8 -*-
#
# gif_population.py
#
# This file is part of NEST.
#
# Copyright (C) 2004 The NEST Initiative
#
# NEST is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# NEST is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with NEST. If not, see <http://www.gnu.org/licenses/>.
"""Population of GIF neuron model with oscillatory behavior
-------------------------------------------------------------
This script simulates a population of generalized integrate-and-fire (GIF)
model neurons driven by noise from a group of Poisson generators.
Due to spike-frequency adaptation, the GIF neurons tend to show oscillatory
behavior on the time scale comparable with the time constant of adaptation
elements (stc and sfa).
Population dynamics are visualized by raster plot and as average firing rate.
References
~~~~~~~~~~~
.. [1] Schwalger T, Degert M, Gerstner W (2017). Towards a theory of cortical columns: From spiking
neurons to interacting neural populations of finite size. PLoS Comput Biol.
https://doi.org/10.1371/journal.pcbi.1005507
.. [2] Mensi S, Naud R, Pozzorini C, Avermann M, Petersen CC and
Gerstner W (2012). Parameter extraction and classification of
three cortical neuron types reveals two distinct adaptation
mechanisms. Journal of Neurophysiology. 107(6), pp.1756-1775.
"""
###############################################################################
# Import all necessary modules for simulation and plotting.
import nest
import nest.raster_plot
import matplotlib.pyplot as plt
nest.ResetKernel()
###############################################################################
# Assigning the simulation parameters to variables.
dt = 0.1
simtime = 2000.0
###############################################################################
# Definition of neural parameters for the GIF model. These parameters are
# extracted by fitting the model to experimental data [2]_.
neuron_params = {"C_m": 83.1,
"g_L": 3.7,
"E_L": -67.0,
"Delta_V": 1.4,
"V_T_star": -39.6,
"t_ref": 4.0,
"V_reset": -36.7,
"lambda_0": 1.0,
"q_stc": [56.7, -6.9],
"tau_stc": [57.8, 218.2],
"q_sfa": [11.7, 1.8],
"tau_sfa": [53.8, 640.0],
"tau_syn_ex": 10.0,
}
###############################################################################
# Definition of the parameters for the population of GIF neurons.
N_ex = 100 # size of the population
p_ex = 0.3 # connection probability inside the population
w_ex = 30.0 # synaptic weights inside the population (pA)
###############################################################################
# Definition of the parameters for the Poisson group and its connection with
# GIF neurons population.
N_noise = 50 # size of Poisson group
rate_noise = 10.0 # firing rate of Poisson neurons (Hz)
w_noise = 20.0 # synaptic weights from Poisson to population neurons (pA)
###############################################################################
# Configuration of the simulation kernel with the previously defined time
# resolution.
nest.SetKernelStatus({"resolution": dt})
###############################################################################
# Building a population of GIF neurons, a group of Poisson neurons and a
# spike detector device for capturing spike times of the population.
population = nest.Create("gif_psc_exp", N_ex, params=neuron_params)
noise = nest.Create("poisson_generator", N_noise, params={'rate': rate_noise})
spike_det = nest.Create("spike_detector")
###############################################################################
# Build connections inside the population of GIF neurons population, between
# Poisson group and the population, and also connecting spike detector to
# the population.
nest.Connect(
population, population, {'rule': 'pairwise_bernoulli', 'p': p_ex},
syn_spec={"weight": w_ex}
)
nest.Connect(noise, population, 'all_to_all', syn_spec={"weight": w_noise})
nest.Connect(population, spike_det)
###############################################################################
# Simulation of the network.
nest.Simulate(simtime)
###############################################################################
# Plotting the results of simulation including raster plot and histogram of
# population activity.
nest.raster_plot.from_device(spike_det, hist=True)
plt.title('Population dynamics')
nest.raster_plot.show()
|
alberto-antonietti/nest-simulator
|
pynest/examples/gif_population.py
|
Python
|
gpl-2.0
| 5,063
|
[
"NEURON"
] |
bd99932e444dda7c7611a718c318fc123422ed142cc142fc5b46b4dda5cce390
|
#! /usr/bin/env python
# -*- coding: utf-8 -*-
# vim:fenc=utf-8
#
# Copyright © 2017 Malcolm Ramsay <malramsay64@gmail.com>
#
# Distributed under terms of the MIT license.
"""Module for testing the initialisation."""
import tempfile
from pathlib import Path
import hoomd
import numpy as np
import pytest
from hypothesis import given, settings
from hypothesis.strategies import floats, integers, tuples
from statdyn import crystals, molecules
from statdyn.simulation import initialise
from statdyn.simulation.helper import SimulationParams
from .crystal_test import get_distance
def create_snapshot():
"""Easily create a snapshot for later use in testing."""
return initialise.init_from_none(hoomd_args='', cell_dimensions=(10, 10))
def create_file():
"""Ease of use function for creating a file for use in testing."""
initialise.init_from_none(hoomd_args='')
with tempfile.NamedTemporaryFile(delete=False) as tmp:
hoomd.dump.gsd(
tmp.name,
period=None,
overwrite=True,
group=hoomd.group.all()
)
return Path(tmp.name)
PARAMETERS = SimulationParams(
temperature=0.4,
num_steps=100,
crystal=crystals.TrimerPg(),
outfile_path=Path('test/tmp'),
cell_dimensions=(10, 10),
)
INIT_TEST_PARAMS = [
(initialise.init_from_none, ''),
(initialise.init_from_file, [create_file(), '']),
(initialise.init_from_crystal, [PARAMETERS]),
]
def test_init_from_none():
"""Ensure init_from_none has the correct type and number of particles."""
snap = initialise.init_from_none(cell_dimensions=(10, 10))
assert snap.particles.N == 100
def test_initialise_snapshot():
"""Test initialisation from a snapshot works."""
initialise.initialise_snapshot(
create_snapshot(),
hoomd.context.initialize(''),
molecules.Trimer(),
)
assert True
@pytest.mark.parametrize("init_func, args", INIT_TEST_PARAMS)
def test_init_all(init_func, args):
"""Test the initialisation of all init functions."""
if args:
init_func(*args)
else:
init_func()
assert True
@pytest.mark.parametrize("init_func, args", INIT_TEST_PARAMS)
def test_2d(init_func, args):
"""Test box is 2d when initialised."""
if args:
sys = init_func(*args)
else:
sys = init_func()
assert sys.box.dimensions == 2
def test_orthorhombic_null():
"""Ensure null operation with orthorhombic function.
In the case where the unit cell is already orthorhombic,
check that nothing has changed unexpectedly.
"""
with hoomd.context.initialize():
snap = create_snapshot()
assert np.all(
initialise.make_orthorhombic(snap).particles.position ==
snap.particles.position)
assert snap.box.xy == 0
assert snap.box.xz == 0
assert snap.box.yz == 0
@given(tuples(integers(max_value=30, min_value=5),
integers(max_value=30, min_value=5)))
@settings(max_examples=10, deadline=None)
def test_make_orthorhombic(cell_dimensions):
"""Ensure that a conversion to an orthorhombic cell goes smoothly.
This tests a number of modes of operation
- nothing changes in an already orthorhombic cell
- no particles are outside the box when moved
- the box is actually orthorhombic
"""
with hoomd.context.initialize():
snap_crys = hoomd.init.create_lattice(
unitcell=crystals.TrimerP2().get_unitcell(),
n=cell_dimensions
).take_snapshot()
snap_ortho = initialise.make_orthorhombic(snap_crys)
assert np.all(
snap_ortho.particles.position[:, 0] < snap_ortho.box.Lx / 2.)
assert np.all(
snap_ortho.particles.position[:, 0] > -snap_ortho.box.Lx / 2.)
assert np.all(
snap_ortho.particles.position[:, 1] < snap_ortho.box.Ly / 2.)
assert np.all(
snap_ortho.particles.position[:, 1] > -snap_ortho.box.Ly / 2.)
assert snap_ortho.box.xy == 0
assert snap_ortho.box.xz == 0
assert snap_ortho.box.yz == 0
@given(tuples(integers(max_value=30, min_value=5),
integers(max_value=30, min_value=5)))
@settings(max_examples=10, deadline=None)
def test_orthorhombic_init(cell_dimensions):
"""Ensure orthorhombic cell initialises correctly."""
snap = initialise.init_from_crystal(PARAMETERS)
snap_ortho = initialise.make_orthorhombic(snap)
assert np.all(snap_ortho.particles.position ==
snap.particles.position)
assert np.all(
snap_ortho.particles.position[:, 0] < snap_ortho.box.Lx / 2.)
assert np.all(
snap_ortho.particles.position[:, 0] > -snap_ortho.box.Lx / 2.)
assert np.all(
snap_ortho.particles.position[:, 1] < snap_ortho.box.Ly / 2.)
assert np.all(
snap_ortho.particles.position[:, 1] > -snap_ortho.box.Ly / 2.)
assert snap_ortho.box.xy == 0
assert snap_ortho.box.xz == 0
assert snap_ortho.box.yz == 0
for i in snap.particles.position:
assert np.sum(get_distance(i, snap.particles.position, snap.box) < 1.1) <= 3
@given(floats(min_value=0.1, allow_infinity=False, allow_nan=False))
@settings(deadline=None)
def test_moment_inertia(scaling_factor):
"""Ensure moment of intertia is set correctly in setup."""
init_mol = molecules.Trimer(moment_inertia_scale=scaling_factor)
snapshot = initialise.initialise_snapshot(
create_snapshot(),
hoomd.context.initialize(''),
init_mol,
).take_snapshot()
nmols = max(snapshot.particles.body) + 1
assert np.allclose(snapshot.particles.moment_inertia[:nmols],
np.array(init_mol.moment_inertia).astype(np.float32))
|
malramsay64/MD-Molecules-Hoomd
|
test/initialise_test.py
|
Python
|
mit
| 5,775
|
[
"CRYSTAL"
] |
7976397ebf2f1cefad955abd0493fdd06377978a2457a943a466d44fd71522db
|
# trbchan.py ---
#
# Filename: trbchan.py
# Description:
# Author: Subhasis Ray
# Maintainer:
# Created: Fri May 4 14:55:52 2012 (+0530)
# Version:
# Last-Updated: Sun Jun 25 10:08:35 2017 (-0400)
# By: subha
# Update #: 347
# URL:
# Keywords:
# Compatibility:
#
#
# Commentary:
#
# Base class for channels in Traub model.
#
#
# Change log:
#
# 2012-05-04 14:55:56 (+0530) subha started porting code from
# channel.py in old moose version to dh_branch.
#
# Code:
from warnings import warn
import numpy as np
import moose
import config
import metafix
vmin = -120e-3
vmax = 40e-3
vdivs = 640
v_array = np.linspace(vmin, vmax, vdivs+1)
ca_min = 0.0
ca_max = 1000.0
ca_divs = 1000
ca_conc = np.linspace(ca_min, ca_max, ca_divs + 1)
prototypes = {}
def setup_gate_tables(gate, param_dict, bases):
suffix = None
if gate.name == 'gateX':
suffix = 'x'
elif gate.name == 'gateY':
suffix = 'y'
elif gate.name == 'gateZ':
suffix = 'z'
else:
raise NameError('Gate in a channel must have names in [`gateX`, `gateY`, `gateZ`]')
if suffix in ['x', 'y']:
gate.min = vmin
gate.max = vmax
gate.divs = vdivs
else:
gate.min = ca_min
gate.max = ca_max
gate.divs = ca_divs
gate.useInterpolation = True
keys = ['%s_%s' % (key, suffix) for key in ['tau', 'inf', 'alpha', 'beta', 'tableA', 'tableB']]
msg = ''
if keys[0] in param_dict:
msg = 'Using tau/inf tables'
gate.tableA = param_dict[keys[1]] / param_dict[keys[0]]
gate.tableB = 1 / param_dict[keys[0]]
elif keys[2] in param_dict:
msg = 'Using alpha/beta tables'
gate.tableA = param_dict[keys[2]]
gate.tableB = param_dict[keys[2]] + param_dict[keys[3]]
elif keys[4] in param_dict:
msg = 'Using A/B tables'
gate.tableA = param_dict[keys[4]]
gate.tableB = param_dict[keys[5]]
else:
for base in bases:
new_bases = base.mro()
new_param_dict = base.__dict__
if new_bases:
new_bases = new_bases[1:]
if setup_gate_tables(gate, new_param_dict, new_bases):
msg = 'Gate setup in baseclass: '+base.__class__.__name__
break
if msg:
config.logger.debug('%s: %s' % (gate.path, msg))
return True
else:
config.logger.debug('%s: nothing was setup for this gate' % (gate.path))
return False
def get_class_field(name, cdict, bases, fieldname, default=None):
if fieldname in cdict:
config.logger.debug('%s: %s=%s' % (name, fieldname, str(cdict[fieldname])))
return cdict[fieldname]
else:
for base in bases:
if hasattr(base, fieldname):
return getattr(base, fieldname)
# warn('field %s not in the hierarchy of %s class. Returning default value.' % (fieldname, name))
return default
class ChannelMeta(type):
def __new__(cls, name, bases, cdict):
global prototypes
# classes that set absract=True will be
# abstract classes. Others will have the prototype insatntiated.
if 'abstract' in cdict and cdict['abstract'] == True:
return type.__new__(cls, name, bases, cdict)
proto = moose.HHChannel('%s/%s' % (config.library.path, name))
xpower = get_class_field(name, cdict, bases, 'Xpower', default=0.0)
if xpower > 0:
proto.Xpower = xpower
gate = moose.HHGate('%s/gateX' % (proto.path))
setup_gate_tables(gate, cdict, bases)
cdict['xGate'] = gate
ypower = get_class_field(name, cdict, bases, 'Ypower', default=0.0)
if ypower > 0:
proto.Ypower = ypower
gate = moose.HHGate('%s/gateY' % (proto.path))
setup_gate_tables(gate, cdict, bases)
cdict['yGate'] = gate
zpower = get_class_field(name, cdict, bases, 'Zpower', default=0.0)
if zpower > 0:
proto.Zpower = zpower
gate = moose.HHGate('%s/gateZ' % (proto.path))
setup_gate_tables(gate, cdict, bases)
cdict['zGate'] = gate
ca_msg_field = moose.Mstring('%s/addmsg1' % (proto.path))
ca_msg_field.value = '../CaPool concOut . concen'
proto.instant = get_class_field(name, cdict, bases, 'instant', default=0)
proto.useConcentration = True
proto.Ek = get_class_field(name, cdict, bases, 'Ek', default=0.0)
X = get_class_field(name, cdict, bases, 'X')
if X is not None:
proto.X = X
Y = get_class_field(name, cdict, bases, 'Y')
if Y is not None:
proto.Y = Y
Z = get_class_field(name, cdict, bases, 'Z')
if Z is not None:
proto.Z = Z
mstring_field = get_class_field(name, cdict, bases, 'mstring')
if mstring_field is not None:
# print 'mstring_field:', mstring_field
mstring = moose.Mstring('%s/%s' % (proto.path, mstring_field[0]))
mstring.value = mstring_field[1]
if 'annotation' in cdict:
info = moose.Annotator('%s/info' % (proto.path))
info.notes = '\n'.join('%s: %s' % kv for kv in list(cdict['annotation'].items()))
# print proto.path, info.notes
cdict['prototype'] = proto
prototypes[name] = proto
config.logger.info('Created prototype: %s of class %s' % (proto.path, name))
return type.__new__(cls, name, bases, cdict)
@metafix.with_metaclass(ChannelMeta)
class ChannelBase(moose.HHChannel):
# __metaclass__ = ChannelMeta # This is incompatible with Python3
annotation = {'cno': 'cno_0000047'}
abstract = True
def __init__(self, path, xpower=1, ypower=0, Ek=0.0):
moose.HHChannel.__init__(self, path)
#
# trbchan.py ends here
|
BhallaLab/moose
|
moose-examples/traub_2005/py/channelbase.py
|
Python
|
gpl-3.0
| 5,926
|
[
"MOOSE"
] |
bc41dd62439ad3ffd6f0ab9364dc0c360613fc0225f92207aba94ec661e2cc94
|
#!/usr/bin/env python3
#* This file is part of the MOOSE framework
#* https://www.mooseframework.org
#*
#* All rights reserved, see COPYRIGHT for full restrictions
#* https://github.com/idaholab/moose/blob/master/COPYRIGHT
#*
#* Licensed under LGPL 2.1, please see LICENSE for details
#* https://www.gnu.org/licenses/lgpl-2.1.html
from peacock.utils import Testing
import os
from PyQt5 import QtWidgets
class Tests(Testing.PeacockTester):
qapp = QtWidgets.QApplication([])
def setUp(self):
super(Tests, self).setUp()
self.oversample_filename = "check_oversample.png"
self.pressure_filename = "pressure.png"
self.globals_filename = "globals.png"
Testing.remove_file(self.oversample_filename)
Testing.remove_file(self.pressure_filename)
Testing.remove_file(self.globals_filename)
Testing.remove_file("with_date.e")
Testing.remove_file("over.e")
Testing.clean_files()
def checkInputFile(self, input_file, image_name, exe_path=None, cwd=None):
Testing.remove_file(image_name)
args = ["-w"]
if cwd:
args.append(cwd)
args.append(os.path.join(cwd, input_file))
else:
args.append(self.starting_directory)
args.append(input_file)
if exe_path:
args.append(exe_path)
else:
args.append(Testing.find_moose_test_exe())
app = self.createPeacockApp(args)
result_plugin = app.main_widget.tab_plugin.ExodusViewer
exe_plugin = app.main_widget.tab_plugin.ExecuteTabPlugin
vtkwin = result_plugin.currentWidget().VTKWindowPlugin
app.main_widget.setTab(result_plugin.tabName())
Testing.set_window_size(vtkwin)
Testing.remove_file("peacock_run_exe_tmp_out.e")
exe_plugin.ExecuteRunnerPlugin.runClicked()
# make sure we are finished
while not self.finished:
self.qapp.processEvents()
Testing.process_events(t=3)
Testing.set_window_size(vtkwin)
vtkwin.onWrite(image_name)
self.assertFalse(Testing.gold_diff(image_name))
def testOversample(self):
self.checkInputFile("../../common/oversample.i", self.oversample_filename)
def testDate(self):
self.checkInputFile("../../common/transient_with_date.i", self.oversample_filename)
def testPressure(self):
"""
There was a problem processing pressure_test.i input file. "type" was incorrectly getting
output on /BCs/Pressure causing a failure.
Make sure it is fixed.
"""
image_name = os.path.abspath(self.pressure_filename)
with Testing.remember_cwd():
pressure_dir = os.path.join(os.environ["MOOSE_DIR"], "modules", "tensor_mechanics", "test", "tests", "pressure")
exe = Testing.find_moose_test_exe("modules/combined", "combined")
self.checkInputFile("pressure_test.i", image_name, exe_path=exe, cwd=pressure_dir)
def testGlobalParams(self):
"""
Issue #127
Global params wasn't being read.
Make sure it is fixed.
"""
image_name = os.path.abspath(self.globals_filename)
with Testing.remember_cwd():
reconstruct_dir = os.path.join(os.environ["MOOSE_DIR"], "modules", "phase_field", "test", "tests", "reconstruction")
exe = Testing.find_moose_test_exe("modules/combined", "combined")
self.checkInputFile("2phase_reconstruction2.i", image_name, exe_path=exe, cwd=reconstruct_dir)
if __name__ == '__main__':
Testing.run_tests()
|
nuclear-wizard/moose
|
python/peacock/tests/peacock_app/results_output/test_results_output.py
|
Python
|
lgpl-2.1
| 3,607
|
[
"MOOSE"
] |
5c1a941d6f1b4e9de1fa13a7bea2c93c7a30260ef9afe81ee7d17b5b121009f8
|
"""
Student Views
"""
import datetime
import logging
import uuid
import json
import warnings
from collections import defaultdict
from pytz import UTC
from requests import HTTPError
from ipware.ip import get_ip
from django.conf import settings
from django.contrib.auth import logout, authenticate, login
from django.contrib.auth.models import User, AnonymousUser
from django.contrib.auth.decorators import login_required
from django.contrib.auth.views import password_reset_confirm
from django.contrib import messages
from django.core.context_processors import csrf
from django.core import mail
from django.core.urlresolvers import reverse
from django.core.validators import validate_email, ValidationError
from django.db import IntegrityError, transaction
from django.http import (HttpResponse, HttpResponseBadRequest, HttpResponseForbidden,
HttpResponseServerError, Http404)
from django.shortcuts import redirect
from django.utils.translation import ungettext
from django.utils.http import base36_to_int
from django.utils.translation import ugettext as _, get_language
from django.views.decorators.cache import never_cache
from django.views.decorators.csrf import csrf_exempt, ensure_csrf_cookie
from django.views.decorators.http import require_POST, require_GET
from django.db.models.signals import post_save
from django.dispatch import receiver
from django.template.response import TemplateResponse
from ratelimitbackend.exceptions import RateLimitException
from social.apps.django_app import utils as social_utils
from social.backends import oauth as social_oauth
from social.exceptions import AuthException, AuthAlreadyAssociated
from edxmako.shortcuts import render_to_response, render_to_string
from course_modes.models import CourseMode
from shoppingcart.api import order_history
from student.models import (
Registration, UserProfile,
PendingEmailChange, CourseEnrollment, CourseEnrollmentAttribute, unique_id_for_user,
CourseEnrollmentAllowed, UserStanding, LoginFailures,
create_comments_service_user, PasswordHistory, UserSignupSource,
DashboardConfiguration, LinkedInAddToProfileConfiguration, ManualEnrollmentAudit, ALLOWEDTOENROLL_TO_ENROLLED)
from student.forms import AccountCreationForm, PasswordResetFormNoActive
from verify_student.models import SoftwareSecurePhotoVerification # pylint: disable=import-error
from certificates.models import CertificateStatuses, certificate_status_for_student
from certificates.api import ( # pylint: disable=import-error
get_certificate_url,
has_html_certificates_enabled,
)
from xmodule.modulestore.django import modulestore
from opaque_keys import InvalidKeyError
from opaque_keys.edx.keys import CourseKey
from opaque_keys.edx.locations import SlashSeparatedCourseKey
from opaque_keys.edx.locator import CourseLocator
from xmodule.modulestore import ModuleStoreEnum
from collections import namedtuple
from courseware.courses import get_courses, sort_by_announcement, sort_by_start_date # pylint: disable=import-error
from courseware.access import has_access
from django_comment_common.models import Role
from external_auth.models import ExternalAuthMap
import external_auth.views
from external_auth.login_and_register import (
login as external_auth_login,
register as external_auth_register
)
from bulk_email.models import Optout, CourseAuthorization
from lang_pref import LANGUAGE_KEY
import track.views
import dogstats_wrapper as dog_stats_api
from util.db import commit_on_success_with_read_committed
from util.json_request import JsonResponse
from util.bad_request_rate_limiter import BadRequestRateLimiter
from util.milestones_helpers import (
get_pre_requisite_courses_not_completed,
)
from microsite_configuration import microsite
from util.password_policy_validators import (
validate_password_length, validate_password_complexity,
validate_password_dictionary
)
import third_party_auth
from third_party_auth import pipeline, provider
from student.helpers import (
check_verify_status_by_course,
auth_pipeline_urls, get_next_url_for_login_page
)
from student.cookies import set_logged_in_cookies, delete_logged_in_cookies
from student.models import anonymous_id_for_user
from shoppingcart.models import DonationConfiguration, CourseRegistrationCode
from embargo import api as embargo_api
import analytics
from eventtracking import tracker
# Note that this lives in LMS, so this dependency should be refactored.
from notification_prefs.views import enable_notifications
# Note that this lives in openedx, so this dependency should be refactored.
from openedx.core.djangoapps.user_api.preferences import api as preferences_api
log = logging.getLogger("edx.student")
AUDIT_LOG = logging.getLogger("audit")
ReverifyInfo = namedtuple('ReverifyInfo', 'course_id course_name course_number date status display') # pylint: disable=invalid-name
SETTING_CHANGE_INITIATED = 'edx.user.settings.change_initiated'
def csrf_token(context):
"""A csrf token that can be included in a form."""
token = context.get('csrf_token', '')
if token == 'NOTPROVIDED':
return ''
return (u'<div style="display:none"><input type="hidden"'
' name="csrfmiddlewaretoken" value="%s" /></div>' % (token))
# NOTE: This view is not linked to directly--it is called from
# branding/views.py:index(), which is cached for anonymous users.
# This means that it should always return the same thing for anon
# users. (in particular, no switching based on query params allowed)
def index(request, extra_context=None, user=AnonymousUser()):
"""
Render the edX main page.
extra_context is used to allow immediate display of certain modal windows, eg signup,
as used by external_auth.
"""
if extra_context is None:
extra_context = {}
# The course selection work is done in courseware.courses.
domain = settings.FEATURES.get('FORCE_UNIVERSITY_DOMAIN') # normally False
# do explicit check, because domain=None is valid
if domain is False:
domain = request.META.get('HTTP_HOST')
courses = get_courses(user, domain=domain)
if microsite.get_value("ENABLE_COURSE_SORTING_BY_START_DATE",
settings.FEATURES["ENABLE_COURSE_SORTING_BY_START_DATE"]):
courses = sort_by_start_date(courses)
else:
courses = sort_by_announcement(courses)
context = {'courses': courses}
context.update(extra_context)
return render_to_response('index.html', context)
def process_survey_link(survey_link, user):
"""
If {UNIQUE_ID} appears in the link, replace it with a unique id for the user.
Currently, this is sha1(user.username). Otherwise, return survey_link.
"""
return survey_link.format(UNIQUE_ID=unique_id_for_user(user))
def cert_info(user, course_overview, course_mode):
"""
Get the certificate info needed to render the dashboard section for the given
student and course.
Arguments:
user (User): A user.
course_overview (CourseOverview): A course.
course_mode (str): The enrollment mode (honor, verified, audit, etc.)
Returns:
dict: A dictionary with keys:
'status': one of 'generating', 'ready', 'notpassing', 'processing', 'restricted'
'show_download_url': bool
'download_url': url, only present if show_download_url is True
'show_disabled_download_button': bool -- true if state is 'generating'
'show_survey_button': bool
'survey_url': url, only if show_survey_button is True
'grade': if status is not 'processing'
"""
if not course_overview.may_certify():
return {}
return _cert_info(
user,
course_overview,
certificate_status_for_student(user, course_overview.id),
course_mode
)
def reverification_info(statuses):
"""
Returns reverification-related information for *all* of user's enrollments whose
reverification status is in statuses.
Args:
statuses (list): a list of reverification statuses we want information for
example: ["must_reverify", "denied"]
Returns:
dictionary of lists: dictionary with one key per status, e.g.
dict["must_reverify"] = []
dict["must_reverify"] = [some information]
"""
reverifications = defaultdict(list)
# Sort the data by the reverification_end_date
for status in statuses:
if reverifications[status]:
reverifications[status].sort(key=lambda x: x.date)
return reverifications
def get_course_enrollments(user, org_to_include, orgs_to_exclude):
"""
Given a user, return a filtered set of his or her course enrollments.
Arguments:
user (User): the user in question.
org_to_include (str): for use in Microsites. If not None, ONLY courses
of this org will be returned.
orgs_to_exclude (list[str]): If org_to_include is not None, this
argument is ignored. Else, courses of this org will be excluded.
Returns:
generator[CourseEnrollment]: a sequence of enrollments to be displayed
on the user's dashboard.
"""
for enrollment in CourseEnrollment.enrollments_for_user(user):
# If the course is missing or broken, log an error and skip it.
course_overview = enrollment.course_overview
if not course_overview:
log.error(
"User %s enrolled in broken or non-existent course %s",
user.username,
enrollment.course_id
)
continue
# If we are in a Microsite, then filter out anything that is not
# attributed (by ORG) to that Microsite.
if org_to_include and course_overview.location.org != org_to_include:
continue
# Conversely, if we are not in a Microsite, then filter out any enrollments
# with courses attributed (by ORG) to Microsites.
elif course_overview.location.org in orgs_to_exclude:
continue
# Else, include the enrollment.
else:
yield enrollment
def _cert_info(user, course_overview, cert_status, course_mode): # pylint: disable=unused-argument
"""
Implements the logic for cert_info -- split out for testing.
Arguments:
user (User): A user.
course_overview (CourseOverview): A course.
course_mode (str): The enrollment mode (honor, verified, audit, etc.)
"""
# simplify the status for the template using this lookup table
template_state = {
CertificateStatuses.generating: 'generating',
CertificateStatuses.regenerating: 'generating',
CertificateStatuses.downloadable: 'ready',
CertificateStatuses.notpassing: 'notpassing',
CertificateStatuses.restricted: 'restricted',
}
default_status = 'processing'
default_info = {'status': default_status,
'show_disabled_download_button': False,
'show_download_url': False,
'show_survey_button': False,
}
if cert_status is None:
return default_info
is_hidden_status = cert_status['status'] in ('unavailable', 'processing', 'generating', 'notpassing')
if course_overview.certificates_display_behavior == 'early_no_info' and is_hidden_status:
return None
status = template_state.get(cert_status['status'], default_status)
status_dict = {
'status': status,
'show_download_url': status == 'ready',
'show_disabled_download_button': status == 'generating',
'mode': cert_status.get('mode', None),
'linked_in_url': None
}
if (status in ('generating', 'ready', 'notpassing', 'restricted') and
course_overview.end_of_course_survey_url is not None):
status_dict.update({
'show_survey_button': True,
'survey_url': process_survey_link(course_overview.end_of_course_survey_url, user)})
else:
status_dict['show_survey_button'] = False
if status == 'ready':
# showing the certificate web view button if certificate is ready state and feature flags are enabled.
if has_html_certificates_enabled(course_overview.id, course_overview):
if course_overview.has_any_active_web_certificate:
certificate_url = get_certificate_url(
user_id=user.id,
course_id=unicode(course_overview.id),
)
status_dict.update({
'show_cert_web_view': True,
'cert_web_view_url': u'{url}'.format(url=certificate_url)
})
else:
# don't show download certificate button if we don't have an active certificate for course
status_dict['show_download_url'] = False
elif 'download_url' not in cert_status:
log.warning(
u"User %s has a downloadable cert for %s, but no download url",
user.username,
course_overview.id
)
return default_info
else:
status_dict['download_url'] = cert_status['download_url']
# If enabled, show the LinkedIn "add to profile" button
# Clicking this button sends the user to LinkedIn where they
# can add the certificate information to their profile.
linkedin_config = LinkedInAddToProfileConfiguration.current()
if linkedin_config.enabled:
status_dict['linked_in_url'] = linkedin_config.add_to_profile_url(
course_overview.id,
course_overview.display_name,
cert_status.get('mode'),
cert_status['download_url']
)
if status in ('generating', 'ready', 'notpassing', 'restricted'):
if 'grade' not in cert_status:
# Note: as of 11/20/2012, we know there are students in this state-- cs169.1x,
# who need to be regraded (we weren't tracking 'notpassing' at first).
# We can add a log.warning here once we think it shouldn't happen.
return default_info
else:
status_dict['grade'] = cert_status['grade']
return status_dict
@ensure_csrf_cookie
def signin_user(request):
"""Deprecated. To be replaced by :class:`student_account.views.login_and_registration_form`."""
external_auth_response = external_auth_login(request)
if external_auth_response is not None:
return external_auth_response
# Determine the URL to redirect to following login:
redirect_to = get_next_url_for_login_page(request)
if request.user.is_authenticated():
return redirect(redirect_to)
third_party_auth_error = None
for msg in messages.get_messages(request):
if msg.extra_tags.split()[0] == "social-auth":
# msg may or may not be translated. Try translating [again] in case we are able to:
third_party_auth_error = _(unicode(msg)) # pylint: disable=translation-of-non-string
break
context = {
'login_redirect_url': redirect_to, # This gets added to the query string of the "Sign In" button in the header
# Bool injected into JS to submit form if we're inside a running third-
# party auth pipeline; distinct from the actual instance of the running
# pipeline, if any.
'pipeline_running': 'true' if pipeline.running(request) else 'false',
'pipeline_url': auth_pipeline_urls(pipeline.AUTH_ENTRY_LOGIN, redirect_url=redirect_to),
'platform_name': microsite.get_value(
'platform_name',
settings.PLATFORM_NAME
),
'third_party_auth_error': third_party_auth_error
}
return render_to_response('login.html', context)
@ensure_csrf_cookie
def register_user(request, extra_context=None):
"""Deprecated. To be replaced by :class:`student_account.views.login_and_registration_form`."""
# Determine the URL to redirect to following login:
redirect_to = get_next_url_for_login_page(request)
if request.user.is_authenticated():
return redirect(redirect_to)
external_auth_response = external_auth_register(request)
if external_auth_response is not None:
return external_auth_response
context = {
'login_redirect_url': redirect_to, # This gets added to the query string of the "Sign In" button in the header
'email': '',
'name': '',
'running_pipeline': None,
'pipeline_urls': auth_pipeline_urls(pipeline.AUTH_ENTRY_REGISTER, redirect_url=redirect_to),
'platform_name': microsite.get_value(
'platform_name',
settings.PLATFORM_NAME
),
'selected_provider': '',
'username': '',
}
if extra_context is not None:
context.update(extra_context)
if context.get("extauth_domain", '').startswith(external_auth.views.SHIBBOLETH_DOMAIN_PREFIX):
return render_to_response('register-shib.html', context)
# If third-party auth is enabled, prepopulate the form with data from the
# selected provider.
if third_party_auth.is_enabled() and pipeline.running(request):
running_pipeline = pipeline.get(request)
current_provider = provider.Registry.get_from_pipeline(running_pipeline)
if current_provider is not None:
overrides = current_provider.get_register_form_data(running_pipeline.get('kwargs'))
overrides['running_pipeline'] = running_pipeline
overrides['selected_provider'] = current_provider.name
context.update(overrides)
return render_to_response('register.html', context)
def complete_course_mode_info(course_id, enrollment, modes=None):
"""
We would like to compute some more information from the given course modes
and the user's current enrollment
Returns the given information:
- whether to show the course upsell information
- numbers of days until they can't upsell anymore
"""
if modes is None:
modes = CourseMode.modes_for_course_dict(course_id)
mode_info = {'show_upsell': False, 'days_for_upsell': None}
# we want to know if the user is already verified and if verified is an
# option
if 'verified' in modes and enrollment.mode != 'verified':
mode_info['show_upsell'] = True
# if there is an expiration date, find out how long from now it is
if modes['verified'].expiration_datetime:
today = datetime.datetime.now(UTC).date()
mode_info['days_for_upsell'] = (modes['verified'].expiration_datetime.date() - today).days
return mode_info
def is_course_blocked(request, redeemed_registration_codes, course_key):
"""Checking either registration is blocked or not ."""
blocked = False
for redeemed_registration in redeemed_registration_codes:
# registration codes may be generated via Bulk Purchase Scenario
# we have to check only for the invoice generated registration codes
# that their invoice is valid or not
if redeemed_registration.invoice_item:
if not getattr(redeemed_registration.invoice_item.invoice, 'is_valid'):
blocked = True
# disabling email notifications for unpaid registration courses
Optout.objects.get_or_create(user=request.user, course_id=course_key)
log.info(
u"User %s (%s) opted out of receiving emails from course %s",
request.user.username,
request.user.email,
course_key
)
track.views.server_track(request, "change-email1-settings", {"receive_emails": "no", "course": course_key.to_deprecated_string()}, page='dashboard')
break
return blocked
@login_required
@ensure_csrf_cookie
def dashboard(request):
user = request.user
platform_name = microsite.get_value("platform_name", settings.PLATFORM_NAME)
# for microsites, we want to filter and only show enrollments for courses within
# the microsites 'ORG'
course_org_filter = microsite.get_value('course_org_filter')
# Let's filter out any courses in an "org" that has been declared to be
# in a Microsite
org_filter_out_set = microsite.get_all_orgs()
# remove our current Microsite from the "filter out" list, if applicable
if course_org_filter:
org_filter_out_set.remove(course_org_filter)
# Build our (course, enrollment) list for the user, but ignore any courses that no
# longer exist (because the course IDs have changed). Still, we don't delete those
# enrollments, because it could have been a data push snafu.
course_enrollments = list(get_course_enrollments(user, course_org_filter, org_filter_out_set))
# sort the enrollment pairs by the enrollment date
course_enrollments.sort(key=lambda x: x.created, reverse=True)
# Retrieve the course modes for each course
enrolled_course_ids = [enrollment.course_id for enrollment in course_enrollments]
__, unexpired_course_modes = CourseMode.all_and_unexpired_modes_for_courses(enrolled_course_ids)
course_modes_by_course = {
course_id: {
mode.slug: mode
for mode in modes
}
for course_id, modes in unexpired_course_modes.iteritems()
}
# Check to see if the student has recently enrolled in a course.
# If so, display a notification message confirming the enrollment.
enrollment_message = _create_recent_enrollment_message(
course_enrollments, course_modes_by_course
)
course_optouts = Optout.objects.filter(user=user).values_list('course_id', flat=True)
message = ""
if not user.is_active:
message = render_to_string(
'registration/activate_account_notice.html',
{'email': user.email, 'platform_name': platform_name}
)
# Global staff can see what courses errored on their dashboard
staff_access = False
errored_courses = {}
if has_access(user, 'staff', 'global'):
# Show any courses that errored on load
staff_access = True
errored_courses = modulestore().get_errored_courses()
show_courseware_links_for = frozenset(
enrollment.course_id for enrollment in course_enrollments
if has_access(request.user, 'load', enrollment.course_overview)
and has_access(request.user, 'view_courseware_with_prerequisites', enrollment.course_overview)
)
# Construct a dictionary of course mode information
# used to render the course list. We re-use the course modes dict
# we loaded earlier to avoid hitting the database.
course_mode_info = {
enrollment.course_id: complete_course_mode_info(
enrollment.course_id, enrollment,
modes=course_modes_by_course[enrollment.course_id]
)
for enrollment in course_enrollments
}
# Determine the per-course verification status
# This is a dictionary in which the keys are course locators
# and the values are one of:
#
# VERIFY_STATUS_NEED_TO_VERIFY
# VERIFY_STATUS_SUBMITTED
# VERIFY_STATUS_APPROVED
# VERIFY_STATUS_MISSED_DEADLINE
#
# Each of which correspond to a particular message to display
# next to the course on the dashboard.
#
# If a course is not included in this dictionary,
# there is no verification messaging to display.
verify_status_by_course = check_verify_status_by_course(user, course_enrollments)
cert_statuses = {
enrollment.course_id: cert_info(request.user, enrollment.course_overview, enrollment.mode)
for enrollment in course_enrollments
}
# only show email settings for Mongo course and when bulk email is turned on
show_email_settings_for = frozenset(
enrollment.course_id for enrollment in course_enrollments if (
settings.FEATURES['ENABLE_INSTRUCTOR_EMAIL'] and
modulestore().get_modulestore_type(enrollment.course_id) != ModuleStoreEnum.Type.xml and
CourseAuthorization.instructor_email_enabled(enrollment.course_id)
)
)
# Verification Attempts
# Used to generate the "you must reverify for course x" banner
verification_status, verification_msg = SoftwareSecurePhotoVerification.user_status(user)
# Gets data for midcourse reverifications, if any are necessary or have failed
statuses = ["approved", "denied", "pending", "must_reverify"]
reverifications = reverification_info(statuses)
show_refund_option_for = frozenset(
enrollment.course_id for enrollment in course_enrollments
if enrollment.refundable()
)
block_courses = frozenset(
enrollment.course_id for enrollment in course_enrollments
if is_course_blocked(
request,
CourseRegistrationCode.objects.filter(
course_id=enrollment.course_id,
registrationcoderedemption__redeemed_by=request.user
),
enrollment.course_id
)
)
enrolled_courses_either_paid = frozenset(
enrollment.course_id for enrollment in course_enrollments
if enrollment.is_paid_course()
)
# If there are *any* denied reverifications that have not been toggled off,
# we'll display the banner
denied_banner = any(item.display for item in reverifications["denied"])
# Populate the Order History for the side-bar.
order_history_list = order_history(user, course_org_filter=course_org_filter, org_filter_out_set=org_filter_out_set)
# get list of courses having pre-requisites yet to be completed
courses_having_prerequisites = frozenset(
enrollment.course_id for enrollment in course_enrollments
if enrollment.course_overview.pre_requisite_courses
)
courses_requirements_not_met = get_pre_requisite_courses_not_completed(user, courses_having_prerequisites)
if 'notlive' in request.GET:
redirect_message = _("The course you are looking for does not start until {date}.").format(
date=request.GET['notlive']
)
else:
redirect_message = ''
context = {
'enrollment_message': enrollment_message,
'redirect_message': redirect_message,
'course_enrollments': course_enrollments,
'course_optouts': course_optouts,
'message': message,
'staff_access': staff_access,
'errored_courses': errored_courses,
'show_courseware_links_for': show_courseware_links_for,
'all_course_modes': course_mode_info,
'cert_statuses': cert_statuses,
'credit_statuses': _credit_statuses(user, course_enrollments),
'show_email_settings_for': show_email_settings_for,
'reverifications': reverifications,
'verification_status': verification_status,
'verification_status_by_course': verify_status_by_course,
'verification_msg': verification_msg,
'show_refund_option_for': show_refund_option_for,
'block_courses': block_courses,
'denied_banner': denied_banner,
'billing_email': settings.PAYMENT_SUPPORT_EMAIL,
'user': user,
'logout_url': reverse(logout_user),
'platform_name': platform_name,
'enrolled_courses_either_paid': enrolled_courses_either_paid,
'provider_states': [],
'order_history_list': order_history_list,
'courses_requirements_not_met': courses_requirements_not_met,
'nav_hidden': True,
}
return render_to_response('dashboard.html', context)
def _create_recent_enrollment_message(course_enrollments, course_modes): # pylint: disable=invalid-name
"""
Builds a recent course enrollment message.
Constructs a new message template based on any recent course enrollments
for the student.
Args:
course_enrollments (list[CourseEnrollment]): a list of course enrollments.
course_modes (dict): Mapping of course ID's to course mode dictionaries.
Returns:
A string representing the HTML message output from the message template.
None if there are no recently enrolled courses.
"""
recently_enrolled_courses = _get_recently_enrolled_courses(course_enrollments)
if recently_enrolled_courses:
messages = [
{
"course_id": enrollment.course_overview.id,
"course_name": enrollment.course_overview.display_name,
"allow_donation": _allow_donation(course_modes, enrollment.course_overview.id, enrollment)
}
for enrollment in recently_enrolled_courses
]
platform_name = microsite.get_value('platform_name', settings.PLATFORM_NAME)
return render_to_string(
'enrollment/course_enrollment_message.html',
{'course_enrollment_messages': messages, 'platform_name': platform_name}
)
def _get_recently_enrolled_courses(course_enrollments):
"""
Given a list of enrollments, filter out all but recent enrollments.
Args:
course_enrollments (list[CourseEnrollment]): A list of course enrollments.
Returns:
list[CourseEnrollment]: A list of recent course enrollments.
"""
seconds = DashboardConfiguration.current().recent_enrollment_time_delta
time_delta = (datetime.datetime.now(UTC) - datetime.timedelta(seconds=seconds))
return [
enrollment for enrollment in course_enrollments
# If the enrollment has no created date, we are explicitly excluding the course
# from the list of recent enrollments.
if enrollment.is_active and enrollment.created > time_delta
]
def _allow_donation(course_modes, course_id, enrollment):
"""Determines if the dashboard will request donations for the given course.
Check if donations are configured for the platform, and if the current course is accepting donations.
Args:
course_modes (dict): Mapping of course ID's to course mode dictionaries.
course_id (str): The unique identifier for the course.
enrollment(CourseEnrollment): The enrollment object in which the user is enrolled
Returns:
True if the course is allowing donations.
"""
donations_enabled = DonationConfiguration.current().enabled
return donations_enabled and enrollment.mode in course_modes[course_id] and course_modes[course_id][enrollment.mode].min_price == 0
def _update_email_opt_in(request, org):
"""Helper function used to hit the profile API if email opt-in is enabled."""
email_opt_in = request.POST.get('email_opt_in')
if email_opt_in is not None:
email_opt_in_boolean = email_opt_in == 'true'
preferences_api.update_email_opt_in(request.user, org, email_opt_in_boolean)
def _credit_statuses(user, course_enrollments):
"""
Retrieve the status for credit courses.
A credit course is a course for which a user can purchased
college credit. The current flow is:
1. User becomes eligible for credit (submits verifications, passes the course, etc.)
2. User purchases credit from a particular credit provider.
3. User requests credit from the provider, usually creating an account on the provider's site.
4. The credit provider notifies us whether the user's request for credit has been accepted or rejected.
The dashboard is responsible for communicating the user's state in this flow.
Arguments:
user (User): The currently logged-in user.
course_enrollments (list[CourseEnrollment]): List of enrollments for the
user.
Returns: dict
The returned dictionary has keys that are `CourseKey`s and values that
are dictionaries with:
* eligible (bool): True if the user is eligible for credit in this course.
* deadline (datetime): The deadline for purchasing and requesting credit for this course.
* purchased (bool): Whether the user has purchased credit for this course.
* provider_name (string): The display name of the credit provider.
* provider_status_url (string): A URL the user can visit to check on their credit request status.
* request_status (string): Either "pending", "approved", or "rejected"
* error (bool): If true, an unexpected error occurred when retrieving the credit status,
so the user should contact the support team.
Example:
>>> _credit_statuses(user, course_enrollments)
{
CourseKey.from_string("edX/DemoX/Demo_Course"): {
"course_key": "edX/DemoX/Demo_Course",
"eligible": True,
"deadline": 2015-11-23 00:00:00 UTC,
"purchased": True,
"provider_name": "Hogwarts",
"provider_status_url": "http://example.com/status",
"request_status": "pending",
"error": False
}
}
"""
from openedx.core.djangoapps.credit import api as credit_api
# Feature flag off
if not settings.FEATURES.get("ENABLE_CREDIT_ELIGIBILITY"):
return {}
request_status_by_course = {
request["course_key"]: request["status"]
for request in credit_api.get_credit_requests_for_user(user.username)
}
credit_enrollments = {
enrollment.course_id: enrollment
for enrollment in course_enrollments
if enrollment.mode == "credit"
}
# When a user purchases credit in a course, the user's enrollment
# mode is set to "credit" and an enrollment attribute is set
# with the ID of the credit provider. We retrieve *all* such attributes
# here to minimize the number of database queries.
purchased_credit_providers = {
attribute.enrollment.course_id: attribute.value
for attribute in CourseEnrollmentAttribute.objects.filter(
namespace="credit",
name="provider_id",
enrollment__in=credit_enrollments.values()
).select_related("enrollment")
}
provider_info_by_id = {
provider["id"]: provider
for provider in credit_api.get_credit_providers()
}
statuses = {}
for eligibility in credit_api.get_eligibilities_for_user(user.username):
course_key = CourseKey.from_string(unicode(eligibility["course_key"]))
status = {
"course_key": unicode(course_key),
"eligible": True,
"deadline": eligibility["deadline"],
"purchased": course_key in credit_enrollments,
"provider_name": None,
"provider_status_url": None,
"provider_id": None,
"request_status": request_status_by_course.get(course_key),
"error": False,
}
# If the user has purchased credit, then include information about the credit
# provider from which the user purchased credit.
# We retrieve the provider's ID from the an "enrollment attribute" set on the user's
# enrollment when the user's order for credit is fulfilled by the E-Commerce service.
if status["purchased"]:
provider_id = purchased_credit_providers.get(course_key)
if provider_id is None:
status["error"] = True
log.error(
u"Could not find credit provider associated with credit enrollment "
u"for user %s in course %s. The user will not be able to see his or her "
u"credit request status on the student dashboard. This attribute should "
u"have been set when the user purchased credit in the course.",
user.id, course_key
)
else:
provider_info = provider_info_by_id.get(provider_id, {})
status["provider_name"] = provider_info.get("display_name")
status["provider_status_url"] = provider_info.get("status_url")
status["provider_id"] = provider_id
statuses[course_key] = status
return statuses
@require_POST
@commit_on_success_with_read_committed
def change_enrollment(request, check_access=True):
"""
Modify the enrollment status for the logged-in user.
The request parameter must be a POST request (other methods return 405)
that specifies course_id and enrollment_action parameters. If course_id or
enrollment_action is not specified, if course_id is not valid, if
enrollment_action is something other than "enroll" or "unenroll", if
enrollment_action is "enroll" and enrollment is closed for the course, or
if enrollment_action is "unenroll" and the user is not enrolled in the
course, a 400 error will be returned. If the user is not logged in, 403
will be returned; it is important that only this case return 403 so the
front end can redirect the user to a registration or login page when this
happens. This function should only be called from an AJAX request, so
the error messages in the responses should never actually be user-visible.
Args:
request (`Request`): The Django request object
Keyword Args:
check_access (boolean): If True, we check that an accessible course actually
exists for the given course_key before we enroll the student.
The default is set to False to avoid breaking legacy code or
code with non-standard flows (ex. beta tester invitations), but
for any standard enrollment flow you probably want this to be True.
Returns:
Response
"""
# Get the user
user = request.user
# Ensure the user is authenticated
if not user.is_authenticated():
return HttpResponseForbidden()
# Ensure we received a course_id
action = request.POST.get("enrollment_action")
if 'course_id' not in request.POST:
return HttpResponseBadRequest(_("Course id not specified"))
try:
course_id = SlashSeparatedCourseKey.from_deprecated_string(request.POST.get("course_id"))
except InvalidKeyError:
log.warning(
u"User %s tried to %s with invalid course id: %s",
user.username,
action,
request.POST.get("course_id"),
)
return HttpResponseBadRequest(_("Invalid course id"))
if action == "enroll":
# Make sure the course exists
# We don't do this check on unenroll, or a bad course id can't be unenrolled from
if not modulestore().has_course(course_id):
log.warning(
u"User %s tried to enroll in non-existent course %s",
user.username,
course_id
)
return HttpResponseBadRequest(_("Course id is invalid"))
# Record the user's email opt-in preference
if settings.FEATURES.get('ENABLE_MKTG_EMAIL_OPT_IN'):
_update_email_opt_in(request, course_id.org)
available_modes = CourseMode.modes_for_course_dict(course_id)
# Check whether the user is blocked from enrolling in this course
# This can occur if the user's IP is on a global blacklist
# or if the user is enrolling in a country in which the course
# is not available.
redirect_url = embargo_api.redirect_if_blocked(
course_id, user=user, ip_address=get_ip(request),
url=request.path
)
if redirect_url:
return HttpResponse(redirect_url)
# Check that auto enrollment is allowed for this course
# (= the course is NOT behind a paywall)
if CourseMode.can_auto_enroll(course_id):
# Enroll the user using the default mode (honor)
# We're assuming that users of the course enrollment table
# will NOT try to look up the course enrollment model
# by its slug. If they do, it's possible (based on the state of the database)
# for no such model to exist, even though we've set the enrollment type
# to "honor".
try:
CourseEnrollment.enroll(user, course_id, check_access=check_access)
except Exception:
return HttpResponseBadRequest(_("Could not enroll"))
# If we have more than one course mode or professional ed is enabled,
# then send the user to the choose your track page.
# (In the case of no-id-professional/professional ed, this will redirect to a page that
# funnels users directly into the verification / payment flow)
if CourseMode.has_verified_mode(available_modes) or CourseMode.has_professional_mode(available_modes):
return HttpResponse(
reverse("course_modes_choose", kwargs={'course_id': unicode(course_id)})
)
# Otherwise, there is only one mode available (the default)
return HttpResponse()
elif action == "unenroll":
if not CourseEnrollment.is_enrolled(user, course_id):
return HttpResponseBadRequest(_("You are not enrolled in this course"))
CourseEnrollment.unenroll(user, course_id)
return HttpResponse()
else:
return HttpResponseBadRequest(_("Enrollment action is invalid"))
# Need different levels of logging
@ensure_csrf_cookie
def login_user(request, error=""): # pylint: disable=too-many-statements,unused-argument
"""AJAX request to log in the user."""
backend_name = None
email = None
password = None
redirect_url = None
response = None
running_pipeline = None
third_party_auth_requested = third_party_auth.is_enabled() and pipeline.running(request)
third_party_auth_successful = False
trumped_by_first_party_auth = bool(request.POST.get('email')) or bool(request.POST.get('password'))
user = None
platform_name = microsite.get_value("platform_name", settings.PLATFORM_NAME)
if third_party_auth_requested and not trumped_by_first_party_auth:
# The user has already authenticated via third-party auth and has not
# asked to do first party auth by supplying a username or password. We
# now want to put them through the same logging and cookie calculation
# logic as with first-party auth.
running_pipeline = pipeline.get(request)
username = running_pipeline['kwargs'].get('username')
backend_name = running_pipeline['backend']
third_party_uid = running_pipeline['kwargs']['uid']
requested_provider = provider.Registry.get_from_pipeline(running_pipeline)
try:
user = pipeline.get_authenticated_user(requested_provider, username, third_party_uid)
third_party_auth_successful = True
except User.DoesNotExist:
AUDIT_LOG.warning(
u'Login failed - user with username {username} has no social auth with backend_name {backend_name}'.format(
username=username, backend_name=backend_name))
return HttpResponse(
_("You've successfully logged into your {provider_name} account, but this account isn't linked with an {platform_name} account yet.").format(
platform_name=platform_name, provider_name=requested_provider.name
)
+ "<br/><br/>" +
_("Use your {platform_name} username and password to log into {platform_name} below, "
"and then link your {platform_name} account with {provider_name} from your dashboard.").format(
platform_name=platform_name, provider_name=requested_provider.name
)
+ "<br/><br/>" +
_("If you don't have an {platform_name} account yet, "
"click <strong>Register</strong> at the top of the page.").format(
platform_name=platform_name),
content_type="text/plain",
status=403
)
else:
if 'email' not in request.POST or 'password' not in request.POST:
return JsonResponse({
"success": False,
"value": _('There was an error receiving your login information. Please email us.'), # TODO: User error message
}) # TODO: this should be status code 400 # pylint: disable=fixme
email = request.POST['email']
password = request.POST['password']
try:
user = User.objects.get(email=email)
except User.DoesNotExist:
if settings.FEATURES['SQUELCH_PII_IN_LOGS']:
AUDIT_LOG.warning(u"Login failed - Unknown user email")
else:
AUDIT_LOG.warning(u"Login failed - Unknown user email: {0}".format(email))
# check if the user has a linked shibboleth account, if so, redirect the user to shib-login
# This behavior is pretty much like what gmail does for shibboleth. Try entering some @stanford.edu
# address into the Gmail login.
if settings.FEATURES.get('AUTH_USE_SHIB') and user:
try:
eamap = ExternalAuthMap.objects.get(user=user)
if eamap.external_domain.startswith(external_auth.views.SHIBBOLETH_DOMAIN_PREFIX):
return JsonResponse({
"success": False,
"redirect": reverse('shib-login'),
}) # TODO: this should be status code 301 # pylint: disable=fixme
except ExternalAuthMap.DoesNotExist:
# This is actually the common case, logging in user without external linked login
AUDIT_LOG.info(u"User %s w/o external auth attempting login", user)
# see if account has been locked out due to excessive login failures
user_found_by_email_lookup = user
if user_found_by_email_lookup and LoginFailures.is_feature_enabled():
if LoginFailures.is_user_locked_out(user_found_by_email_lookup):
return JsonResponse({
"success": False,
"value": _('This account has been temporarily locked due to excessive login failures. Try again later.'),
}) # TODO: this should be status code 429 # pylint: disable=fixme
# see if the user must reset his/her password due to any policy settings
if user_found_by_email_lookup and PasswordHistory.should_user_reset_password_now(user_found_by_email_lookup):
return JsonResponse({
"success": False,
"value": _('Your password has expired due to password policy on this account. You must '
'reset your password before you can log in again. Please click the '
'"Forgot Password" link on this page to reset your password before logging in again.'),
}) # TODO: this should be status code 403 # pylint: disable=fixme
# if the user doesn't exist, we want to set the username to an invalid
# username so that authentication is guaranteed to fail and we can take
# advantage of the ratelimited backend
username = user.username if user else ""
if not third_party_auth_successful:
try:
user = authenticate(username=username, password=password, request=request)
# this occurs when there are too many attempts from the same IP address
except RateLimitException:
return JsonResponse({
"success": False,
"value": _('Too many failed login attempts. Try again later.'),
}) # TODO: this should be status code 429 # pylint: disable=fixme
if user is None:
# tick the failed login counters if the user exists in the database
if user_found_by_email_lookup and LoginFailures.is_feature_enabled():
LoginFailures.increment_lockout_counter(user_found_by_email_lookup)
# if we didn't find this username earlier, the account for this email
# doesn't exist, and doesn't have a corresponding password
if username != "":
if settings.FEATURES['SQUELCH_PII_IN_LOGS']:
loggable_id = user_found_by_email_lookup.id if user_found_by_email_lookup else "<unknown>"
AUDIT_LOG.warning(u"Login failed - password for user.id: {0} is invalid".format(loggable_id))
else:
AUDIT_LOG.warning(u"Login failed - password for {0} is invalid".format(email))
return JsonResponse({
"success": False,
"value": _('Email or password is incorrect.'),
}) # TODO: this should be status code 400 # pylint: disable=fixme
# successful login, clear failed login attempts counters, if applicable
if LoginFailures.is_feature_enabled():
LoginFailures.clear_lockout_counter(user)
# Track the user's sign in
if settings.FEATURES.get('SEGMENT_IO_LMS') and hasattr(settings, 'SEGMENT_IO_LMS_KEY'):
tracking_context = tracker.get_tracker().resolve_context()
analytics.identify(user.id, {
'email': email,
'username': username,
})
analytics.track(
user.id,
"edx.bi.user.account.authenticated",
{
'category': "conversion",
'label': request.POST.get('course_id'),
'provider': None
},
context={
'Google Analytics': {
'clientId': tracking_context.get('client_id')
}
}
)
if user is not None and user.is_active:
try:
# We do not log here, because we have a handler registered
# to perform logging on successful logins.
login(request, user)
if request.POST.get('remember') == 'true':
request.session.set_expiry(604800)
log.debug("Setting user session to never expire")
else:
request.session.set_expiry(0)
except Exception as exc: # pylint: disable=broad-except
AUDIT_LOG.critical("Login failed - Could not create session. Is memcached running?")
log.critical("Login failed - Could not create session. Is memcached running?")
log.exception(exc)
raise
redirect_url = None # The AJAX method calling should know the default destination upon success
if third_party_auth_successful:
redirect_url = pipeline.get_complete_url(backend_name)
response = JsonResponse({
"success": True,
"redirect_url": redirect_url,
})
# Ensure that the external marketing site can
# detect that the user is logged in.
return set_logged_in_cookies(request, response, user)
if settings.FEATURES['SQUELCH_PII_IN_LOGS']:
AUDIT_LOG.warning(u"Login failed - Account not active for user.id: {0}, resending activation".format(user.id))
else:
AUDIT_LOG.warning(u"Login failed - Account not active for user {0}, resending activation".format(username))
reactivation_email_for_user(user)
not_activated_msg = _("This account has not been activated. We have sent another activation message. Please check your email for the activation instructions.")
return JsonResponse({
"success": False,
"value": not_activated_msg,
}) # TODO: this should be status code 400 # pylint: disable=fixme
@csrf_exempt
@require_POST
@social_utils.strategy("social:complete")
def login_oauth_token(request, backend):
"""
Authenticate the client using an OAuth access token by using the token to
retrieve information from a third party and matching that information to an
existing user.
"""
warnings.warn("Please use AccessTokenExchangeView instead.", DeprecationWarning)
backend = request.backend
if isinstance(backend, social_oauth.BaseOAuth1) or isinstance(backend, social_oauth.BaseOAuth2):
if "access_token" in request.POST:
# Tell third party auth pipeline that this is an API call
request.session[pipeline.AUTH_ENTRY_KEY] = pipeline.AUTH_ENTRY_LOGIN_API
user = None
try:
user = backend.do_auth(request.POST["access_token"])
except (HTTPError, AuthException):
pass
# do_auth can return a non-User object if it fails
if user and isinstance(user, User):
login(request, user)
return JsonResponse(status=204)
else:
# Ensure user does not re-enter the pipeline
request.social_strategy.clean_partial_pipeline()
return JsonResponse({"error": "invalid_token"}, status=401)
else:
return JsonResponse({"error": "invalid_request"}, status=400)
raise Http404
@ensure_csrf_cookie
def logout_user(request):
"""
HTTP request to log out the user. Redirects to marketing page.
Deletes both the CSRF and sessionid cookies so the marketing
site can determine the logged in state of the user
"""
# We do not log here, because we have a handler registered
# to perform logging on successful logouts.
logout(request)
if settings.FEATURES.get('AUTH_USE_CAS'):
target = reverse('cas-logout')
else:
target = '/'
response = redirect(target)
delete_logged_in_cookies(response)
return response
@require_GET
@login_required
@ensure_csrf_cookie
def manage_user_standing(request):
"""
Renders the view used to manage user standing. Also displays a table
of user accounts that have been disabled and who disabled them.
"""
if not request.user.is_staff:
raise Http404
all_disabled_accounts = UserStanding.objects.filter(
account_status=UserStanding.ACCOUNT_DISABLED
)
all_disabled_users = [standing.user for standing in all_disabled_accounts]
headers = ['username', 'account_changed_by']
rows = []
for user in all_disabled_users:
row = [user.username, user.standing.all()[0].changed_by]
rows.append(row)
context = {'headers': headers, 'rows': rows}
return render_to_response("manage_user_standing.html", context)
@require_POST
@login_required
@ensure_csrf_cookie
def disable_account_ajax(request):
"""
Ajax call to change user standing. Endpoint of the form
in manage_user_standing.html
"""
if not request.user.is_staff:
raise Http404
username = request.POST.get('username')
context = {}
if username is None or username.strip() == '':
context['message'] = _('Please enter a username')
return JsonResponse(context, status=400)
account_action = request.POST.get('account_action')
if account_action is None:
context['message'] = _('Please choose an option')
return JsonResponse(context, status=400)
username = username.strip()
try:
user = User.objects.get(username=username)
except User.DoesNotExist:
context['message'] = _("User with username {} does not exist").format(username)
return JsonResponse(context, status=400)
else:
user_account, _success = UserStanding.objects.get_or_create(
user=user, defaults={'changed_by': request.user},
)
if account_action == 'disable':
user_account.account_status = UserStanding.ACCOUNT_DISABLED
context['message'] = _("Successfully disabled {}'s account").format(username)
log.info(u"%s disabled %s's account", request.user, username)
elif account_action == 'reenable':
user_account.account_status = UserStanding.ACCOUNT_ENABLED
context['message'] = _("Successfully reenabled {}'s account").format(username)
log.info(u"%s reenabled %s's account", request.user, username)
else:
context['message'] = _("Unexpected account status")
return JsonResponse(context, status=400)
user_account.changed_by = request.user
user_account.standing_last_changed_at = datetime.datetime.now(UTC)
user_account.save()
return JsonResponse(context)
@login_required
@ensure_csrf_cookie
def change_setting(request):
"""JSON call to change a profile setting: Right now, location"""
# TODO (vshnayder): location is no longer used
u_prof = UserProfile.objects.get(user=request.user) # request.user.profile_cache
if 'location' in request.POST:
u_prof.location = request.POST['location']
u_prof.save()
return JsonResponse({
"success": True,
"location": u_prof.location,
})
class AccountValidationError(Exception):
def __init__(self, message, field):
super(AccountValidationError, self).__init__(message)
self.field = field
@receiver(post_save, sender=User)
def user_signup_handler(sender, **kwargs): # pylint: disable=unused-argument
"""
handler that saves the user Signup Source
when the user is created
"""
if 'created' in kwargs and kwargs['created']:
site = microsite.get_value('SITE_NAME')
if site:
user_signup_source = UserSignupSource(user=kwargs['instance'], site=site)
user_signup_source.save()
log.info(u'user {} originated from a white labeled "Microsite"'.format(kwargs['instance'].id))
def _do_create_account(form):
"""
Given cleaned post variables, create the User and UserProfile objects, as well as the
registration for this user.
Returns a tuple (User, UserProfile, Registration).
Note: this function is also used for creating test users.
"""
if not form.is_valid():
raise ValidationError(form.errors)
user = User(
username=form.cleaned_data["username"],
email=form.cleaned_data["email"],
is_active=False
)
user.set_password(form.cleaned_data["password"])
registration = Registration()
# TODO: Rearrange so that if part of the process fails, the whole process fails.
# Right now, we can have e.g. no registration e-mail sent out and a zombie account
try:
user.save()
except IntegrityError:
# Figure out the cause of the integrity error
if len(User.objects.filter(username=user.username)) > 0:
raise AccountValidationError(
_("An account with the Public Username '{username}' already exists.").format(username=user.username),
field="username"
)
elif len(User.objects.filter(email=user.email)) > 0:
raise AccountValidationError(
_("An account with the Email '{email}' already exists.").format(email=user.email),
field="email"
)
else:
raise
# add this account creation to password history
# NOTE, this will be a NOP unless the feature has been turned on in configuration
password_history_entry = PasswordHistory()
password_history_entry.create(user)
registration.register(user)
profile_fields = [
"name", "level_of_education", "gender", "mailing_address", "city", "country", "goals",
"year_of_birth"
]
profile = UserProfile(
user=user,
**{key: form.cleaned_data.get(key) for key in profile_fields}
)
extended_profile = form.cleaned_extended_profile
if extended_profile:
profile.meta = json.dumps(extended_profile)
try:
profile.save()
except Exception: # pylint: disable=broad-except
log.exception("UserProfile creation failed for user {id}.".format(id=user.id))
raise
return (user, profile, registration)
def create_account_with_params(request, params):
"""
Given a request and a dict of parameters (which may or may not have come
from the request), create an account for the requesting user, including
creating a comments service user object and sending an activation email.
This also takes external/third-party auth into account, updates that as
necessary, and authenticates the user for the request's session.
Does not return anything.
Raises AccountValidationError if an account with the username or email
specified by params already exists, or ValidationError if any of the given
parameters is invalid for any other reason.
Issues with this code:
* It is not transactional. If there is a failure part-way, an incomplete
account will be created and left in the database.
* Third-party auth passwords are not verified. There is a comment that
they are unused, but it would be helpful to have a sanity check that
they are sane.
* It is over 300 lines long (!) and includes disprate functionality, from
registration e-mails to all sorts of other things. It should be broken
up into semantically meaningful functions.
* The user-facing text is rather unfriendly (e.g. "Username must be a
minimum of two characters long" rather than "Please use a username of
at least two characters").
"""
# Copy params so we can modify it; we can't just do dict(params) because if
# params is request.POST, that results in a dict containing lists of values
params = dict(params.items())
# allow for microsites to define their own set of required/optional/hidden fields
extra_fields = microsite.get_value(
'REGISTRATION_EXTRA_FIELDS',
getattr(settings, 'REGISTRATION_EXTRA_FIELDS', {})
)
# Boolean of whether a 3rd party auth provider and credentials were provided in
# the API so the newly created account can link with the 3rd party account.
#
# Note: this is orthogonal to the 3rd party authentication pipeline that occurs
# when the account is created via the browser and redirect URLs.
should_link_with_social_auth = third_party_auth.is_enabled() and 'provider' in params
if should_link_with_social_auth or (third_party_auth.is_enabled() and pipeline.running(request)):
params["password"] = pipeline.make_random_password()
# if doing signup for an external authorization, then get email, password, name from the eamap
# don't use the ones from the form, since the user could have hacked those
# unless originally we didn't get a valid email or name from the external auth
# TODO: We do not check whether these values meet all necessary criteria, such as email length
do_external_auth = 'ExternalAuthMap' in request.session
if do_external_auth:
eamap = request.session['ExternalAuthMap']
try:
validate_email(eamap.external_email)
params["email"] = eamap.external_email
except ValidationError:
pass
if eamap.external_name.strip() != '':
params["name"] = eamap.external_name
params["password"] = eamap.internal_password
log.debug(u'In create_account with external_auth: user = %s, email=%s', params["name"], params["email"])
extended_profile_fields = microsite.get_value('extended_profile_fields', [])
enforce_password_policy = (
settings.FEATURES.get("ENFORCE_PASSWORD_POLICY", False) and
not do_external_auth
)
# Can't have terms of service for certain SHIB users, like at Stanford
tos_required = (
not settings.FEATURES.get("AUTH_USE_SHIB") or
not settings.FEATURES.get("SHIB_DISABLE_TOS") or
not do_external_auth or
not eamap.external_domain.startswith(
external_auth.views.SHIBBOLETH_DOMAIN_PREFIX
)
)
form = AccountCreationForm(
data=params,
extra_fields=extra_fields,
extended_profile_fields=extended_profile_fields,
enforce_username_neq_password=True,
enforce_password_policy=enforce_password_policy,
tos_required=tos_required,
)
# Perform operations within a transaction that are critical to account creation
with transaction.commit_on_success():
# first, create the account
(user, profile, registration) = _do_create_account(form)
# next, link the account with social auth, if provided via the API.
# (If the user is using the normal register page, the social auth pipeline does the linking, not this code)
if should_link_with_social_auth:
backend_name = params['provider']
request.social_strategy = social_utils.load_strategy(request)
redirect_uri = reverse('social:complete', args=(backend_name, ))
request.backend = social_utils.load_backend(request.social_strategy, backend_name, redirect_uri)
social_access_token = params.get('access_token')
if not social_access_token:
raise ValidationError({
'access_token': [
_("An access_token is required when passing value ({}) for provider.").format(
params['provider']
)
]
})
request.session[pipeline.AUTH_ENTRY_KEY] = pipeline.AUTH_ENTRY_REGISTER_API
pipeline_user = None
error_message = ""
try:
pipeline_user = request.backend.do_auth(social_access_token, user=user)
except AuthAlreadyAssociated:
error_message = _("The provided access_token is already associated with another user.")
except (HTTPError, AuthException):
error_message = _("The provided access_token is not valid.")
if not pipeline_user or not isinstance(pipeline_user, User):
# Ensure user does not re-enter the pipeline
request.social_strategy.clean_partial_pipeline()
raise ValidationError({'access_token': [error_message]})
# Perform operations that are non-critical parts of account creation
preferences_api.set_user_preference(user, LANGUAGE_KEY, get_language())
if settings.FEATURES.get('ENABLE_DISCUSSION_EMAIL_DIGEST'):
try:
enable_notifications(user)
except Exception:
log.exception("Enable discussion notifications failed for user {id}.".format(id=user.id))
dog_stats_api.increment("common.student.account_created")
# If the user is registering via 3rd party auth, track which provider they use
third_party_provider = None
running_pipeline = None
if third_party_auth.is_enabled() and pipeline.running(request):
running_pipeline = pipeline.get(request)
third_party_provider = provider.Registry.get_from_pipeline(running_pipeline)
# Track the user's registration
if settings.FEATURES.get('SEGMENT_IO_LMS') and hasattr(settings, 'SEGMENT_IO_LMS_KEY'):
tracking_context = tracker.get_tracker().resolve_context()
analytics.identify(user.id, {
'email': user.email,
'username': user.username,
})
analytics.track(
user.id,
"edx.bi.user.account.registered",
{
'category': 'conversion',
'label': params.get('course_id'),
'provider': third_party_provider.name if third_party_provider else None
},
context={
'Google Analytics': {
'clientId': tracking_context.get('client_id')
}
}
)
create_comments_service_user(user)
# Don't send email if we are:
#
# 1. Doing load testing.
# 2. Random user generation for other forms of testing.
# 3. External auth bypassing activation.
# 4. Have the platform configured to not require e-mail activation.
# 5. Registering a new user using a trusted third party provider (with skip_email_verification=True)
#
# Note that this feature is only tested as a flag set one way or
# the other for *new* systems. we need to be careful about
# changing settings on a running system to make sure no users are
# left in an inconsistent state (or doing a migration if they are).
send_email = (
not settings.FEATURES.get('SKIP_EMAIL_VALIDATION', None) and
not settings.FEATURES.get('AUTOMATIC_AUTH_FOR_TESTING') and
not (do_external_auth and settings.FEATURES.get('BYPASS_ACTIVATION_EMAIL_FOR_EXTAUTH')) and
not (
third_party_provider and third_party_provider.skip_email_verification and
user.email == running_pipeline['kwargs'].get('details', {}).get('email')
)
)
if send_email:
context = {
'name': profile.name,
'key': registration.activation_key,
}
# composes activation email
subject = render_to_string('emails/activation_email_subject.txt', context)
# Email subject *must not* contain newlines
subject = ''.join(subject.splitlines())
message = render_to_string('emails/activation_email.txt', context)
from_address = microsite.get_value(
'email_from_address',
settings.DEFAULT_FROM_EMAIL
)
try:
if settings.FEATURES.get('REROUTE_ACTIVATION_EMAIL'):
dest_addr = settings.FEATURES['REROUTE_ACTIVATION_EMAIL']
message = ("Activation for %s (%s): %s\n" % (user, user.email, profile.name) +
'-' * 80 + '\n\n' + message)
mail.send_mail(subject, message, from_address, [dest_addr], fail_silently=False)
else:
user.email_user(subject, message, from_address)
except Exception: # pylint: disable=broad-except
log.error(u'Unable to send activation email to user from "%s"', from_address, exc_info=True)
else:
registration.activate()
# Immediately after a user creates an account, we log them in. They are only
# logged in until they close the browser. They can't log in again until they click
# the activation link from the email.
new_user = authenticate(username=user.username, password=params['password'])
login(request, new_user)
request.session.set_expiry(0)
# TODO: there is no error checking here to see that the user actually logged in successfully,
# and is not yet an active user.
if new_user is not None:
AUDIT_LOG.info(u"Login success on new account creation - {0}".format(new_user.username))
if do_external_auth:
eamap.user = new_user
eamap.dtsignup = datetime.datetime.now(UTC)
eamap.save()
AUDIT_LOG.info(u"User registered with external_auth %s", new_user.username)
AUDIT_LOG.info(u'Updated ExternalAuthMap for %s to be %s', new_user.username, eamap)
if settings.FEATURES.get('BYPASS_ACTIVATION_EMAIL_FOR_EXTAUTH'):
log.info('bypassing activation email')
new_user.is_active = True
new_user.save()
AUDIT_LOG.info(u"Login activated on extauth account - {0} ({1})".format(new_user.username, new_user.email))
return new_user
@csrf_exempt
def create_account(request, post_override=None):
"""
JSON call to create new edX account.
Used by form in signup_modal.html, which is included into navigation.html
"""
warnings.warn("Please use RegistrationView instead.", DeprecationWarning)
try:
user = create_account_with_params(request, post_override or request.POST)
except AccountValidationError as exc:
return JsonResponse({'success': False, 'value': exc.message, 'field': exc.field}, status=400)
except ValidationError as exc:
field, error_list = next(exc.message_dict.iteritems())
return JsonResponse(
{
"success": False,
"field": field,
"value": error_list[0],
},
status=400
)
redirect_url = None # The AJAX method calling should know the default destination upon success
# Resume the third-party-auth pipeline if necessary.
if third_party_auth.is_enabled() and pipeline.running(request):
running_pipeline = pipeline.get(request)
redirect_url = pipeline.get_complete_url(running_pipeline['backend'])
response = JsonResponse({
'success': True,
'redirect_url': redirect_url,
})
set_logged_in_cookies(request, response, user)
return response
def auto_auth(request):
"""
Create or configure a user account, then log in as that user.
Enabled only when
settings.FEATURES['AUTOMATIC_AUTH_FOR_TESTING'] is true.
Accepts the following querystring parameters:
* `username`, `email`, and `password` for the user account
* `full_name` for the user profile (the user's full name; defaults to the username)
* `staff`: Set to "true" to make the user global staff.
* `course_id`: Enroll the student in the course with `course_id`
* `roles`: Comma-separated list of roles to grant the student in the course with `course_id`
* `no_login`: Define this to create the user but not login
If username, email, or password are not provided, use
randomly generated credentials.
"""
# Generate a unique name to use if none provided
unique_name = uuid.uuid4().hex[0:30]
# Use the params from the request, otherwise use these defaults
username = request.GET.get('username', unique_name)
password = request.GET.get('password', unique_name)
email = request.GET.get('email', unique_name + "@example.com")
full_name = request.GET.get('full_name', username)
is_staff = request.GET.get('staff', None)
course_id = request.GET.get('course_id', None)
# mode has to be one of 'honor'/'professional'/'verified'/'audit'/'no-id-professional'/'credit'
enrollment_mode = request.GET.get('enrollment_mode', 'honor')
course_key = None
if course_id:
course_key = CourseLocator.from_string(course_id)
role_names = [v.strip() for v in request.GET.get('roles', '').split(',') if v.strip()]
login_when_done = 'no_login' not in request.GET
form = AccountCreationForm(
data={
'username': username,
'email': email,
'password': password,
'name': full_name,
},
tos_required=False
)
# Attempt to create the account.
# If successful, this will return a tuple containing
# the new user object.
try:
user, profile, reg = _do_create_account(form)
except AccountValidationError:
# Attempt to retrieve the existing user.
user = User.objects.get(username=username)
user.email = email
user.set_password(password)
user.save()
profile = UserProfile.objects.get(user=user)
reg = Registration.objects.get(user=user)
# Set the user's global staff bit
if is_staff is not None:
user.is_staff = (is_staff == "true")
user.save()
# Activate the user
reg.activate()
reg.save()
# ensure parental consent threshold is met
year = datetime.date.today().year
age_limit = settings.PARENTAL_CONSENT_AGE_LIMIT
profile.year_of_birth = (year - age_limit) - 1
profile.save()
# Enroll the user in a course
if course_key is not None:
CourseEnrollment.enroll(user, course_key, mode=enrollment_mode)
# Apply the roles
for role_name in role_names:
role = Role.objects.get(name=role_name, course_id=course_key)
user.roles.add(role)
# Log in as the user
if login_when_done:
user = authenticate(username=username, password=password)
login(request, user)
create_comments_service_user(user)
# Provide the user with a valid CSRF token
# then return a 200 response
if request.META.get('HTTP_ACCEPT') == 'application/json':
response = JsonResponse({
'created_status': u"Logged in" if login_when_done else "Created",
'username': username,
'email': email,
'password': password,
'user_id': user.id, # pylint: disable=no-member
'anonymous_id': anonymous_id_for_user(user, None),
})
else:
success_msg = u"{} user {} ({}) with password {} and user_id {}".format(
u"Logged in" if login_when_done else "Created",
username, email, password, user.id # pylint: disable=no-member
)
response = HttpResponse(success_msg)
response.set_cookie('csrftoken', csrf(request)['csrf_token'])
return response
@ensure_csrf_cookie
def activate_account(request, key):
"""When link in activation e-mail is clicked"""
regs = Registration.objects.filter(activation_key=key)
if len(regs) == 1:
user_logged_in = request.user.is_authenticated()
already_active = True
if not regs[0].user.is_active:
regs[0].activate()
already_active = False
# Enroll student in any pending courses he/she may have if auto_enroll flag is set
student = User.objects.filter(id=regs[0].user_id)
if student:
ceas = CourseEnrollmentAllowed.objects.filter(email=student[0].email)
for cea in ceas:
if cea.auto_enroll:
enrollment = CourseEnrollment.enroll(student[0], cea.course_id)
manual_enrollment_audit = ManualEnrollmentAudit.get_manual_enrollment_by_email(student[0].email)
if manual_enrollment_audit is not None:
# get the enrolled by user and reason from the ManualEnrollmentAudit table.
# then create a new ManualEnrollmentAudit table entry for the same email
# different transition state.
ManualEnrollmentAudit.create_manual_enrollment_audit(
manual_enrollment_audit.enrolled_by, student[0].email, ALLOWEDTOENROLL_TO_ENROLLED,
manual_enrollment_audit.reason, enrollment
)
resp = render_to_response(
"registration/activation_complete.html",
{
'user_logged_in': user_logged_in,
'already_active': already_active
}
)
return resp
if len(regs) == 0:
return render_to_response(
"registration/activation_invalid.html",
{'csrf': csrf(request)['csrf_token']}
)
return HttpResponseServerError(_("Unknown error. Please e-mail us to let us know how it happened."))
@csrf_exempt
@require_POST
def password_reset(request):
""" Attempts to send a password reset e-mail. """
# Add some rate limiting here by re-using the RateLimitMixin as a helper class
limiter = BadRequestRateLimiter()
if limiter.is_rate_limit_exceeded(request):
AUDIT_LOG.warning("Rate limit exceeded in password_reset")
return HttpResponseForbidden()
form = PasswordResetFormNoActive(request.POST)
if form.is_valid():
form.save(use_https=request.is_secure(),
from_email=microsite.get_value('email_from_address', settings.DEFAULT_FROM_EMAIL),
request=request,
domain_override=request.get_host())
# When password change is complete, a "edx.user.settings.changed" event will be emitted.
# But because changing the password is multi-step, we also emit an event here so that we can
# track where the request was initiated.
tracker.emit(
SETTING_CHANGE_INITIATED,
{
"setting": "password",
"old": None,
"new": None,
"user_id": request.user.id,
}
)
else:
# bad user? tick the rate limiter counter
AUDIT_LOG.info("Bad password_reset user passed in.")
limiter.tick_bad_request_counter(request)
return JsonResponse({
'success': True,
'value': render_to_string('registration/password_reset_done.html', {}),
})
def password_reset_confirm_wrapper(
request,
uidb36=None,
token=None,
):
""" A wrapper around django.contrib.auth.views.password_reset_confirm.
Needed because we want to set the user as active at this step.
"""
# cribbed from django.contrib.auth.views.password_reset_confirm
try:
uid_int = base36_to_int(uidb36)
user = User.objects.get(id=uid_int)
user.is_active = True
user.save()
except (ValueError, User.DoesNotExist):
pass
# tie in password strength enforcement as an optional level of
# security protection
err_msg = None
if request.method == 'POST':
password = request.POST['new_password1']
if settings.FEATURES.get('ENFORCE_PASSWORD_POLICY', False):
try:
validate_password_length(password)
validate_password_complexity(password)
validate_password_dictionary(password)
except ValidationError, err:
err_msg = _('Password: ') + '; '.join(err.messages)
# also, check the password reuse policy
if not PasswordHistory.is_allowable_password_reuse(user, password):
if user.is_staff:
num_distinct = settings.ADVANCED_SECURITY_CONFIG['MIN_DIFFERENT_STAFF_PASSWORDS_BEFORE_REUSE']
else:
num_distinct = settings.ADVANCED_SECURITY_CONFIG['MIN_DIFFERENT_STUDENT_PASSWORDS_BEFORE_REUSE']
err_msg = ungettext(
"You are re-using a password that you have used recently. You must have {num} distinct password before reusing a previous password.",
"You are re-using a password that you have used recently. You must have {num} distinct passwords before reusing a previous password.",
num_distinct
).format(num=num_distinct)
# also, check to see if passwords are getting reset too frequent
if PasswordHistory.is_password_reset_too_soon(user):
num_days = settings.ADVANCED_SECURITY_CONFIG['MIN_TIME_IN_DAYS_BETWEEN_ALLOWED_RESETS']
err_msg = ungettext(
"You are resetting passwords too frequently. Due to security policies, {num} day must elapse between password resets.",
"You are resetting passwords too frequently. Due to security policies, {num} days must elapse between password resets.",
num_days
).format(num=num_days)
if err_msg:
# We have an password reset attempt which violates some security policy, use the
# existing Django template to communicate this back to the user
context = {
'validlink': True,
'form': None,
'title': _('Password reset unsuccessful'),
'err_msg': err_msg,
'platform_name': microsite.get_value('platform_name', settings.PLATFORM_NAME),
}
return TemplateResponse(request, 'registration/password_reset_confirm.html', context)
else:
# we also want to pass settings.PLATFORM_NAME in as extra_context
extra_context = {"platform_name": microsite.get_value('platform_name', settings.PLATFORM_NAME)}
if request.method == 'POST':
# remember what the old password hash is before we call down
old_password_hash = user.password
result = password_reset_confirm(
request, uidb36=uidb36, token=token, extra_context=extra_context
)
# get the updated user
updated_user = User.objects.get(id=uid_int)
# did the password hash change, if so record it in the PasswordHistory
if updated_user.password != old_password_hash:
entry = PasswordHistory()
entry.create(updated_user)
return result
else:
return password_reset_confirm(
request, uidb36=uidb36, token=token, extra_context=extra_context
)
def reactivation_email_for_user(user):
try:
reg = Registration.objects.get(user=user)
except Registration.DoesNotExist:
return JsonResponse({
"success": False,
"error": _('No inactive user with this e-mail exists'),
}) # TODO: this should be status code 400 # pylint: disable=fixme
context = {
'name': user.profile.name,
'key': reg.activation_key,
}
subject = render_to_string('emails/activation_email_subject.txt', context)
subject = ''.join(subject.splitlines())
message = render_to_string('emails/activation_email.txt', context)
try:
user.email_user(subject, message, settings.DEFAULT_FROM_EMAIL)
except Exception: # pylint: disable=broad-except
log.error(u'Unable to send reactivation email from "%s"', settings.DEFAULT_FROM_EMAIL, exc_info=True)
return JsonResponse({
"success": False,
"error": _('Unable to send reactivation email')
}) # TODO: this should be status code 500 # pylint: disable=fixme
return JsonResponse({"success": True})
def validate_new_email(user, new_email):
"""
Given a new email for a user, does some basic verification of the new address If any issues are encountered
with verification a ValueError will be thrown.
"""
try:
validate_email(new_email)
except ValidationError:
raise ValueError(_('Valid e-mail address required.'))
if new_email == user.email:
raise ValueError(_('Old email is the same as the new email.'))
if User.objects.filter(email=new_email).count() != 0:
raise ValueError(_('An account with this e-mail already exists.'))
def do_email_change_request(user, new_email, activation_key=None):
"""
Given a new email for a user, does some basic verification of the new address and sends an activation message
to the new address. If any issues are encountered with verification or sending the message, a ValueError will
be thrown.
"""
pec_list = PendingEmailChange.objects.filter(user=user)
if len(pec_list) == 0:
pec = PendingEmailChange()
pec.user = user
else:
pec = pec_list[0]
# if activation_key is not passing as an argument, generate a random key
if not activation_key:
activation_key = uuid.uuid4().hex
pec.new_email = new_email
pec.activation_key = activation_key
pec.save()
context = {
'key': pec.activation_key,
'old_email': user.email,
'new_email': pec.new_email
}
subject = render_to_string('emails/email_change_subject.txt', context)
subject = ''.join(subject.splitlines())
message = render_to_string('emails/email_change.txt', context)
from_address = microsite.get_value(
'email_from_address',
settings.DEFAULT_FROM_EMAIL
)
try:
mail.send_mail(subject, message, from_address, [pec.new_email])
except Exception: # pylint: disable=broad-except
log.error(u'Unable to send email activation link to user from "%s"', from_address, exc_info=True)
raise ValueError(_('Unable to send email activation link. Please try again later.'))
# When the email address change is complete, a "edx.user.settings.changed" event will be emitted.
# But because changing the email address is multi-step, we also emit an event here so that we can
# track where the request was initiated.
tracker.emit(
SETTING_CHANGE_INITIATED,
{
"setting": "email",
"old": context['old_email'],
"new": context['new_email'],
"user_id": user.id,
}
)
@ensure_csrf_cookie
@transaction.commit_manually
def confirm_email_change(request, key): # pylint: disable=unused-argument
"""
User requested a new e-mail. This is called when the activation
link is clicked. We confirm with the old e-mail, and update
"""
try:
try:
pec = PendingEmailChange.objects.get(activation_key=key)
except PendingEmailChange.DoesNotExist:
response = render_to_response("invalid_email_key.html", {})
transaction.rollback()
return response
user = pec.user
address_context = {
'old_email': user.email,
'new_email': pec.new_email
}
if len(User.objects.filter(email=pec.new_email)) != 0:
response = render_to_response("email_exists.html", {})
transaction.rollback()
return response
subject = render_to_string('emails/email_change_subject.txt', address_context)
subject = ''.join(subject.splitlines())
message = render_to_string('emails/confirm_email_change.txt', address_context)
u_prof = UserProfile.objects.get(user=user)
meta = u_prof.get_meta()
if 'old_emails' not in meta:
meta['old_emails'] = []
meta['old_emails'].append([user.email, datetime.datetime.now(UTC).isoformat()])
u_prof.set_meta(meta)
u_prof.save()
# Send it to the old email...
try:
user.email_user(subject, message, settings.DEFAULT_FROM_EMAIL)
except Exception: # pylint: disable=broad-except
log.warning('Unable to send confirmation email to old address', exc_info=True)
response = render_to_response("email_change_failed.html", {'email': user.email})
transaction.rollback()
return response
user.email = pec.new_email
user.save()
pec.delete()
# And send it to the new email...
try:
user.email_user(subject, message, settings.DEFAULT_FROM_EMAIL)
except Exception: # pylint: disable=broad-except
log.warning('Unable to send confirmation email to new address', exc_info=True)
response = render_to_response("email_change_failed.html", {'email': pec.new_email})
transaction.rollback()
return response
response = render_to_response("email_change_successful.html", address_context)
transaction.commit()
return response
except Exception: # pylint: disable=broad-except
# If we get an unexpected exception, be sure to rollback the transaction
transaction.rollback()
raise
@require_POST
@login_required
@ensure_csrf_cookie
def change_email_settings(request):
"""Modify logged-in user's setting for receiving emails from a course."""
user = request.user
course_id = request.POST.get("course_id")
course_key = SlashSeparatedCourseKey.from_deprecated_string(course_id)
receive_emails = request.POST.get("receive_emails")
if receive_emails:
optout_object = Optout.objects.filter(user=user, course_id=course_key)
if optout_object:
optout_object.delete()
log.info(
u"User %s (%s) opted in to receive emails from course %s",
user.username,
user.email,
course_id
)
track.views.server_track(request, "change-email-settings", {"receive_emails": "yes", "course": course_id}, page='dashboard')
else:
Optout.objects.get_or_create(user=user, course_id=course_key)
log.info(
u"User %s (%s) opted out of receiving emails from course %s",
user.username,
user.email,
course_id
)
track.views.server_track(request, "change-email-settings", {"receive_emails": "no", "course": course_id}, page='dashboard')
return JsonResponse({"success": True})
|
zofuthan/edx-platform
|
common/djangoapps/student/views.py
|
Python
|
agpl-3.0
| 91,744
|
[
"VisIt"
] |
6c5442cb02d86748724905643ba94b639319376a766662abcbc9dea256cddcde
|
# encoding: utf-8
'''
Various vertical coordinates
Presently, only ocean s-coordinates are supported. Future plans will be to
include all of the vertical coordinate systems defined by the CF conventions.
'''
__docformat__ = "restructuredtext en"
import numpy as np
import warnings
class s_coordinate(object):
"""
Song and Haidvogel (1994) vertical coordinate transformation (Vtransform=1) and
stretching functions (Vstretching=1).
return an object that can be indexed to return depths
s = s_coordinate(h, theta_b, theta_s, Tcline, N)
"""
def __init__(self, h, theta_b, theta_s, Tcline, N, hraw=None, zeta=None):
self.hraw = hraw
self.h = np.asarray(h)
self.hmin = h.min()
self.theta_b = theta_b
self.theta_s = theta_s
self.Tcline = Tcline
self.N = int(N)
self.Np = self.N+1
self.hc = min(self.hmin, self.Tcline)
self.Vtrans = 1
if (self.Tcline > self.hmin):
warnings.warn('Vertical transformation parameters are not defined correctly in either gridid.txt or in the history files: \n Tcline = %d and hmin = %d. \n You need to make sure that Tcline <= hmin when using transformation 1.' %(self.Tcline,self.hmin))
self.c1 = 1.0
self.c2 = 2.0
self.p5 = 0.5
if zeta is None:
self.zeta = np.zeros(h.shape)
else:
self.zeta = zeta
self._get_s_rho()
self._get_s_w()
self._get_Cs_r()
self._get_Cs_w()
self.z_r = z_r(self.h, self.hc, self.N, self.s_rho, self.Cs_r, self.zeta, self.Vtrans)
self.z_w = z_w(self.h, self.hc, self.Np, self.s_w, self.Cs_w, self.zeta, self.Vtrans)
def _get_s_rho(self):
lev = np.arange(1,self.N+1,1)
ds = 1.0 / self.N
self.s_rho = -self.c1 + (lev - self.p5) * ds
def _get_s_w(self):
lev = np.arange(0,self.Np,1)
ds = 1.0 / (self.Np-1)
self.s_w = -self.c1 + lev * ds
def _get_Cs_r(self):
if (self.theta_s >= 0):
Ptheta = np.sinh(self.theta_s * self.s_rho) / np.sinh(self.theta_s)
Rtheta = np.tanh(self.theta_s * (self.s_rho + self.p5)) / \
(self.c2 * np.tanh(self.p5 * self.theta_s)) - self.p5
self.Cs_r = (self.c1 - self.theta_b) * Ptheta + self.theta_b * Rtheta
else:
self.Cs_r = self.s_rho
def _get_Cs_w(self):
if (self.theta_s >= 0):
Ptheta = np.sinh(self.theta_s * self.s_w) / np.sinh(self.theta_s)
Rtheta = np.tanh(self.theta_s * (self.s_w + self.p5)) / \
(self.c2 * np.tanh(self.p5 * self.theta_s)) - self.p5
self.Cs_w = (self.c1 - self.theta_b) * Ptheta + self.theta_b * Rtheta
else:
self.Cs_w = self.s_w
class s_coordinate_2(s_coordinate):
"""
A. Shchepetkin (2005) UCLA-ROMS vertical coordinate transformation (Vtransform=2) and
stretching functions (Vstretching=2).
return an object that can be indexed to return depths
s = s_coordinate_2(h, theta_b, theta_s, Tcline, N)
"""
def __init__(self, h, theta_b, theta_s, Tcline, N, hraw=None, zeta=None):
self.hraw = hraw
self.h = np.asarray(h)
self.hmin = h.min()
self.theta_b = theta_b
self.theta_s = theta_s
self.Tcline = Tcline
self.N = int(N)
self.Np = self.N+1
self.hc = self.Tcline
self.Vtrans = 2
self.Aweight = 1.0
self.Bweight = 1.0
self.c1 = 1.0
self.c2 = 2.0
self.p5 = 0.5
if zeta is None:
self.zeta = np.zeros(h.shape)
else:
self.zeta = zeta
self._get_s_rho()
self._get_s_w()
self._get_Cs_r()
self._get_Cs_w()
self.z_r = z_r(self.h, self.hc, self.N, self.s_rho, self.Cs_r, self.zeta, self.Vtrans)
self.z_w = z_w(self.h, self.hc, self.Np, self.s_w, self.Cs_w, self.zeta, self.Vtrans)
def _get_s_rho(self):
super(s_coordinate_2, self)._get_s_rho()
def _get_s_w(self):
super(s_coordinate_2, self)._get_s_w()
def _get_Cs_r(self):
if (self.theta_s >= 0):
Csur = (self.c1 - np.cosh(self.theta_s * self.s_rho)) / \
(np.cosh(self.theta_s) - self.c1)
if (self.theta_b >= 0):
Cbot = np.sinh(self.theta_b * (self.s_rho + self.c1)) / \
np.sinh(self.theta_b) - self.c1
Cweight = (self.s_rho + self.c1)**self.Aweight * \
(self.c1 + (self.Aweight / self.Bweight) * \
(self.c1 - (self.s_rho + self.c1)**self.Bweight))
self.Cs_r = Cweight * Csur + (self.c1 - Cweight) * Cbot
else:
self.Cs_r = Csur
else:
self.Cs_r = self.s_rho
def _get_Cs_w(self):
if (self.theta_s >= 0):
Csur = (self.c1 - np.cosh(self.theta_s * self.s_w)) / \
(np.cosh(self.theta_s) - self.c1)
if (self.theta_b >= 0):
Cbot = np.sinh(self.theta_b * (self.s_w + self.c1)) / \
np.sinh(self.theta_b) - self.c1
Cweight = (self.s_w + self.c1)**self.Aweight * \
(self.c1 + (self.Aweight / self.Bweight) * \
(self.c1 - (self.s_w + self.c1)**self.Bweight))
self.Cs_w = Cweight * Csur + (self.c1 - Cweight) * Cbot
else:
self.Cs_w = Csur
else:
self.Cs_w = self.s_w
class s_coordinate_4(s_coordinate):
"""
A. Shchepetkin (2005) UCLA-ROMS vertical coordinate transformation (Vtransform=2) and
stretching functions (Vstretching=4).
return an object that can be indexed to return depths
s = s_coordinate_4(h, theta_b, theta_s, Tcline, N)
"""
def __init__(self, h, theta_b, theta_s, Tcline, N, hraw=None, zeta=None):
self.hraw = hraw
self.h = np.asarray(h)
self.hmin = h.min()
self.theta_b = theta_b
self.theta_s = theta_s
self.Tcline = Tcline
self.N = int(N)
self.Np = self.N+1
self.hc = self.Tcline
self.Vtrans = 4
self.c1 = 1.0
self.c2 = 2.0
self.p5 = 0.5
if zeta is None:
self.zeta = np.zeros(h.shape)
else:
self.zeta = zeta
self._get_s_rho()
self._get_s_w()
self._get_Cs_r()
self._get_Cs_w()
self.z_r = z_r(self.h, self.hc, self.N, self.s_rho, self.Cs_r, self.zeta, self.Vtrans)
self.z_w = z_w(self.h, self.hc, self.Np, self.s_w, self.Cs_w, self.zeta, self.Vtrans)
def _get_s_rho(self):
super(s_coordinate_4, self)._get_s_rho()
def _get_s_w(self):
super(s_coordinate_4, self)._get_s_w()
def _get_Cs_r(self):
if (self.theta_s > 0):
Csur = (self.c1 - np.cosh(self.theta_s * self.s_rho)) / \
(np.cosh(self.theta_s) - self.c1)
else:
Csur = -self.s_rho**2
if (self.theta_b > 0):
Cbot = (np.exp(self.theta_b * Csur) - self.c1 ) / \
(self.c1 - np.exp(-self.theta_b))
self.Cs_r = Cbot
else:
self.Cs_r = Csur
def _get_Cs_w(self):
if (self.theta_s > 0):
Csur = (self.c1 - np.cosh(self.theta_s * self.s_w)) / \
(np.cosh(self.theta_s) - self.c1)
else:
Csur = -self.s_w**2
if (self.theta_b > 0):
Cbot = (np.exp(self.theta_b * Csur) - self.c1 ) / \
( self.c1 - np.exp(-self.theta_b) )
self.Cs_w = Cbot
else:
self.Cs_w = Csur
class s_coordinate_5(s_coordinate):
"""
A. Shchepetkin (2005) UCLA-ROMS vertical coordinate transformation (Vtransform=2) and
stretching functions (Vstretching=5).
return an object that can be indexed to return depths
s = s_coordinate_5(h, theta_b, theta_s, Tcline, N)
Brian Powell's surface stretching.
"""
def __init__(self, h, theta_b, theta_s, Tcline, N, hraw=None, zeta=None):
self.hraw = hraw
self.h = np.asarray(h)
self.hmin = h.min()
self.theta_b = theta_b
self.theta_s = theta_s
self.Tcline = Tcline
self.N = int(N)
self.Np = self.N+1
self.hc = self.Tcline
self.Vtrans = 5
self.c1 = 1.0
self.c2 = 2.0
self.p5 = 0.5
if zeta is None:
self.zeta = np.zeros(h.shape)
else:
self.zeta = zeta
self._get_s_rho()
self._get_s_w()
self._get_Cs_r()
self._get_Cs_w()
self.z_r = z_r(self.h, self.hc, self.N, self.s_rho, self.Cs_r, self.zeta, self.Vtrans)
self.z_w = z_w(self.h, self.hc, self.Np, self.s_w, self.Cs_w, self.zeta, self.Vtrans)
def _get_s_rho(self):
lev = np.arange(1,self.N+1,1)
s = -(lev * lev - 2 * lev * self.N + lev + self.N * self.N - self.N) / \
(self.N * self.N - self.N) - \
0.01 * (lev * lev - lev * self.N) / (self.c1 - self.N)
# (self.c1 * self.N * self.N - self.N) - \
self.s_rho = s
def _get_s_w(self):
lev = np.arange(0,self.Np,1)
s = -(lev * lev - 2 * lev * self.N + lev + self.N * self.N - self.N) / \
(self.N * self.N - self.N) - \
0.01 * (lev * lev - lev * self.N) / (self.c1 - self.N)
# (self.c1 * self.N * self.N - self.N) - \
self.s_w = s
def _get_Cs_r(self):
if self.theta_s > 0:
csur = (self.c1 - np.cosh(self.theta_s * self.s_rho)) / \
(np.cosh(self.theta_s) - self.c1)
else:
csur = -(self.s_rho * self.s_rho)
if self.theta_b > 0:
self.Cs_r = (np.exp(self.theta_b * (csur + self.c1)) - self.c1) / \
(np.exp(self.theta_b) - self.c1) - self.c1
else:
self.Cs_r = csur
def _get_Cs_w(self):
if self.theta_s > 0:
csur = (self.c1 - np.cosh(self.theta_s * self.s_w)) / \
(np.cosh(self.theta_s) - self.c1)
else:
csur = -(self.s_w * self.s_w)
if self.theta_b > 0:
self.Cs_w = (np.exp(self.theta_b * (csur + self.c1)) - self.c1) / \
(np.exp(self.theta_b) - self.c1) - self.c1
else:
self.Cs_w = csur
class z_r(object):
"""
return an object that can be indexed to return depths of rho point
z_r = z_r(h, hc, N, s_rho, Cs_r, zeta, Vtrans)
"""
def __init__(self, h, hc, N, s_rho, Cs_r, zeta, Vtrans):
self.h = h
self.hc = hc
self.N = N
self.s_rho = s_rho
self.Cs_r = Cs_r
self.zeta = zeta
self.Vtrans = Vtrans
def __getitem__(self, key):
if isinstance(key, tuple) and len(self.zeta.shape) > len(self.h.shape):
zeta = self.zeta[key[0]]
res_index = (slice(None),) + key[1:]
elif len(self.zeta.shape) > len(self.h.shape):
zeta = self.zeta[key]
res_index = slice(None)
else:
zeta = self.zeta
res_index = key
if self.h.ndim == zeta.ndim: # Assure a time-dimension exists
zeta = zeta[np.newaxis, :]
ti = zeta.shape[0]
z_r = np.empty((ti, self.N) + self.h.shape, 'd')
if self.Vtrans == 1:
for n in range(ti):
for k in range(self.N):
z0 = self.hc * self.s_rho[k] + (self.h - self.hc) * self.Cs_r[k]
z_r[n,k,:] = z0 + zeta[n,:] * (1.0 + z0 / self.h)
elif self.Vtrans == 2 or self.Vtrans == 4 or self.Vtrans == 5:
for n in range(ti):
for k in range(self.N):
z0 = (self.hc * self.s_rho[k] + self.h * self.Cs_r[k]) / \
(self.hc + self.h)
z_r[n,k,:] = zeta[n,:] + (zeta[n,:] + self.h) * z0
return np.squeeze(z_r[res_index])
class z_w(object):
"""
return an object that can be indexed to return depths of w point
z_w = z_w(h, hc, Np, s_w, Cs_w, zeta, Vtrans)
"""
def __init__(self, h, hc, Np, s_w, Cs_w, zeta, Vtrans):
self.h = h
self.hc = hc
self.Np = Np
self.s_w = s_w
self.Cs_w = Cs_w
self.zeta = zeta
self.Vtrans = Vtrans
def __getitem__(self, key):
if isinstance(key, tuple) and len(self.zeta.shape) > len(self.h.shape):
zeta = self.zeta[key[0]]
res_index = (slice(None),) + key[1:]
elif len(self.zeta.shape) > len(self.h.shape):
zeta = self.zeta[key]
res_index = slice(None)
else:
zeta = self.zeta
res_index = key
if self.h.ndim == zeta.ndim: # Assure a time-dimension exists
zeta = zeta[np.newaxis, :]
ti = zeta.shape[0]
z_w = np.empty((ti, self.Np) + self.h.shape, 'd')
if self.Vtrans == 1:
for n in range(ti):
for k in range(self.Np):
z0 = self.hc * self.s_w[k] + (self.h - self.hc) * self.Cs_w[k]
z_w[n,k,:] = z0 + zeta[n,:] * (1.0 + z0 / self.h)
elif self.Vtrans == 2 or self.Vtrans == 4:
for n in range(ti):
for k in range(self.Np):
z0 = (self.hc * self.s_w[k] + self.h * self.Cs_w[k]) / \
(self.hc + self.h)
z_w[n,k,:] = zeta[n,:] + (zeta[n,:] + self.h) * z0
return np.squeeze(z_w[res_index])
class z_coordinate(object):
"""
return an object that can be indexed to return depths
z = z_coordinate(h, depth, N)
"""
def __init__(self, h, depth, N):
self.h = np.asarray(h)
self.N = int(N)
ndim = len(h.shape)
# print h.shape, ndim
if ndim == 2:
Mm, Lm = h.shape
self.z = np.zeros((N, Mm, Lm))
elif ndim == 1:
Sm = h.shape[0]
self.z = np.zeros((N, Sm))
for k in range(N):
self.z[k,:] = depth[k]
|
kshedstrom/pyroms
|
pyroms/pyroms/vgrid.py
|
Python
|
bsd-3-clause
| 14,274
|
[
"Brian"
] |
92e0995cd90a2929a7c80f3588bf66eb134f22945cccf5b5feb411fc90a0f176
|
# -*- coding: utf-8 -*-
# ****************************************************************************
# Original work Copyright (C) 2013-2015 SUNCAT
# Modified work Copyright 2015-2017 Lukasz Mentel
#
# This file is distributed under the terms of the
# GNU General Public License. See the file 'COPYING'
# in the root directory of the present distribution,
# or http://www.gnu.org/copyleft/gpl.txt .
# ****************************************************************************
from __future__ import unicode_literals
from collections import namedtuple
import numpy as np
from ase import constraints
__version__ = "0.3.4"
speciestuple = namedtuple(
"speciestuple", ["symbol", "mass", "magmom", "U", "J", "U_alpha"]
)
def num2str(x):
"""
Add 'd00' to floating point number to avoid random trailing digits in
Fortran input routines
"""
if "e" in str(x):
return str(x)
else:
return str(x) + "d00"
def bool2str(x):
"Convert python to fortran logical"
if x:
return ".true."
else:
return ".false."
def convert_constraints(atoms):
"""
Convert some of ase's constraints to pw.x constraints for pw.x internal
relaxation returns constraints which are simply expressed as setting
force components as first list and other contraints that are
implemented in espresso as second list
"""
if atoms.constraints:
n = len(atoms)
if n == 0:
return [], []
forcefilter = []
otherconstr = []
for c in atoms.constraints:
if isinstance(c, constraints.FixAtoms):
if len(forcefilter) == 0:
forcefilter = np.ones((n, 3), np.int)
forcefilter[c.index] = [0, 0, 0]
elif isinstance(c, constraints.FixCartesian):
if len(forcefilter) == 0:
forcefilter = np.ones((n, 3), np.int)
forcefilter[c.a] = c.mask
elif isinstance(c, constraints.FixBondLengths):
for d in c.constraints:
otherconstr.append(
"'distance' %d %d" % (d.indices[0] + 1, d.indices[1] + 1)
)
elif isinstance(c, constraints.FixBondLength):
otherconstr.append(
"'distance' %d %d" % (c.indices[0] + 1, c.indices[1] + 1)
)
elif isinstance(c, constraints.FixInternals):
# we ignore the epsilon in FixInternals because there can only be one global
# epsilon be defined in espresso for all constraints
for d in c.constraints:
if isinstance(d, constraints.FixInternals.FixBondLengthAlt):
otherconstr.append(
"'distance' %d %d %s"
% (d.indices[0] + 1, d.indices[1] + 1, num2str(d.bond))
)
elif isinstance(d, constraints.FixInternals.FixAngle):
otherconstr.append(
"'planar_angle' %d %d %d %s"
% (
d.indices[0] + 1,
d.indices[1] + 1,
d.indices[2] + 1,
num2str(np.arccos(d.angle) * 180.0 / np.pi),
)
)
elif isinstance(d, constraints.FixInternals.FixDihedral):
otherconstr.append(
"'torsional_angle' %d %d %d %d %s"
% (
d.indices[0] + 1,
d.indices[1] + 1,
d.indices[2] + 1,
d.indices[3] + 1,
num2str(np.arccos(d.angle) * 180.0 / np.pi),
)
)
else:
raise NotImplementedError(
"constraint {} from FixInternals not implemented\n"
"consider ase-based relaxation with this constraint instead".format(
d.__name__
)
)
else:
raise NotImplementedError(
"constraint {} not implemented\n"
"consider ase-based relaxation with this constraint instead".format(
c.__name__
)
)
return forcefilter, otherconstr
else:
return [], []
|
lmmentel/ase-espresso
|
espresso/utils.py
|
Python
|
gpl-3.0
| 4,693
|
[
"ASE",
"ESPResSo"
] |
7835e11c799ecc428540ad689614a7340864a466deb0110217e5347b0ab6993b
|
import pyspeckit
# Read in J000002.09+155254.1 spectrum, a nice emission-line galaxy
sp = pyspeckit.Spectrum('SIIdoublet.fits')
# Read in rest wavelengths of SII lines. If you didn't know the names already,
# you could do sp.speclines.optical.lines.keys() to see what is available.
SIIa = sp.speclines.optical.lines['SIIa'][0]
SIIb = sp.speclines.optical.lines['SIIb'][0]
# Wavelength difference between doublet lines - use to tie positions together
offset = SIIb - SIIa
# Let's have a look at the spectrum
sp.plotter()
# raw_input('Let\'s do a simple continuum subtraction (continue)')
# Plot the baseline fit
sp.baseline(subtract = False)
# raw_input('Let\'s zoom in on the SII doublet (continue)')
# Subtract the baseline fit and save
sp.baseline(subtract = True)
sp.plotter.savefig('doublet_example_fullspectrum.png')
# Guess for the redshift - otherwise we'll end up with the Halpha-NII complex
z = 0.02
# Zoom in on SII doublet
sp.plotter(xmin = SIIa * (1 + z) - 75, xmax = SIIb * (1 + z) + 75, ymin = -10, ymax = 60)
# Guess amplitudes to be 100, positions to be rest wavelengths
# times factor (1 + z), and widths to be 5 Angstroms
guesses = [100, SIIa * (1 + z), 5, 100, SIIb * (1 + z), 5]
tied = ['', '', '', '', 'p[1] + %g' % offset, '']
# Do the fit, and plot it
sp.specfit(guesses = guesses, tied = tied, quiet = False)
sp.plotter.savefig('doublet_example_SII.png')
# raw_input('Hooray! The doublet has been fit. ')
SIIb_obs = sp.specfit.modelpars[-2]
print 'Our guess for the redshift was z = 0.02.'
print 'The redshift, as derived by the line shift, is z = %g' % ((SIIb_obs / SIIb) - 1)
|
bsipocz/pyspeckit
|
examples/doublet_example.py
|
Python
|
mit
| 1,642
|
[
"Galaxy"
] |
6339f27bb0206c16c12ed2433f761176c44f6fe65f05aa33a8eff651ee51bc03
|
import argparse
import em.tools.input_output as IO
def register_parser(subparsers):
parser = subparsers.add_parser('align', usage=usage(), description=description())
add_arguments(parser)
def add_arguments(parser):
parser.add_argument("--input", metavar="FILE", help="Input File or Reference Structure.", required=True)
parser.add_argument("--refatoms", type=str, help="String with atoms for alignment in reference structure.", required=True)
parser.add_argument("--fit", metavar="FILE", type=str, help="Path to file for the fit structure.", required=True)
parser.add_argument("--fitatoms", type=str, help="String with atoms for alignment in fit structure.", required=True)
parser.add_argument("--out", metavar="FILE", type=str, help="Path to output file of reference structure aligned over the fit struscture",required=True)
parser.add_argument("--addatoms", default="", type=str, help="""Optional, default no atoms. Cuts and pastes all atoms corresponding
to amino acids defined by a string such as A,202,217:A,202,217. The region
before the column corresponds to the region to be cut from the reference
structure, and the region defined after the column is to be pasted to
the fit structure. Additions renumbers all amino acids after that.
Unlike the string that defines reference and fit atoms, atom type is
not needed, and all atoms in the amino acids are added.""")
parser.set_defaults(func=run)
def run(options):
IO.align_pdbs(options.input, options.fit, options.refatoms, options.fitatoms, options.out, options.addatoms)
def description():
return '''This command aligns regions according to specific atoms and amino acid contigous sections of two
crystal structures. The fit structure is moved over the input/reference structure according to
reference and fit atoms selections respectively. If the opion to addatoms is included, it adds
atoms from the reference to the fit structure's specified region. The format to specify reference
and fit atoms is ATOMTYPE,CHAIN,FROMAMINOACID,TOAMINOACID Ex: CA,B,2,6. Multiple regions for aligment can be
defined by separating them with a column. Ex: CA,B,2,6:CA,B,10,26
'''
def usage():
return '\npdb_cif.py align --input 1GIA.cif --refatoms CA,B,199,201:CA,B,218,220 --fit 1GDD.pdb --f\
itatoms CA,B,199,201:CA,B,218,220 --out 1GDD_aligned_completed_with_1GIA.pdb --addatoms A,202,218:A,202,218'
if __name__ == '__main__':
arg_parser = argparse.ArgumentParser(description=description())
add_arguments(arg_parser)
args = arg_parser.parse_args()
args.func(args)
|
noelcjr/EntropyMaxima
|
em/subscripts/align.py
|
Python
|
gpl-3.0
| 3,061
|
[
"CRYSTAL"
] |
c32908c2de8f13849f1e973fc1c1d6bbc31aa23613f472d0568a58ef35cbb418
|
try:
from safe_pylab import *
from matplotlib.collections import *
import plot_wkb
except:
pass
from numpy import *
import sys,time
# qgis clashes with the Rtree library (because it includes its own local copy).
# fall back to a wrapper around the qgis spatial index if it looks like we're running
# under qgis.
from safe_rtree import Rtree
try:
from shapely import geometry
import shapely.predicates
except ImportError:
print "Shapely is not available!"
geometry = "unavailable"
import priority_queue as pq
import code
import os,types
from collections import Iterable
try:
try:
from osgeo import ogr,osr
except ImportError:
import ogr,osr
except ImportError:
print "GDAL failed to load"
ogr = "unavailable"
osr = ogr
from array_append import array_append
# edge markers:
CUT_EDGE = 37 # the marker for a cut edge
OPEN_EDGE = 3
LAND_EDGE = 1
DELETED_EDGE = -1
# edge-cell markers ( the cell ids that reside in the edge array
BOUNDARY = -1 # cell marker for edge of domain
UNMESHED = -2 # cell marker for edges not yet meshed
xxyy = array([0,0,1,1])
xyxy = array([0,1,0,1])
def dist(a,b):
return sqrt(sum((a-b)**2,axis=-1))
# rotate the given vectors/points through the CCW angle in radians
def rot(angle,pnts):
R = array( [[cos(angle),-sin(angle)],
[sin(angle),cos(angle)]] )
return tensordot(R,pnts,axes=(1,-1) ).transpose() # may have to tweak this for multiple points
def signed_area(points):
i = arange(points.shape[0])
ip1 = (i+1)%(points.shape[0])
return 0.5*(points[i,0]*points[ip1,1] - points[ip1,0]*points[i,1]).sum()
def is_ccw(points):
return signed_area(points) > 0
def ensure_ccw(points):
if not is_ccw(points):
# print "Hey - you gave me CW points. I will reverse"
points = points[::-1]
return points
def ensure_cw(points):
if is_ccw(points):
# print "Hey - you gave me CCW points. I will reverse"
points = points[::-1]
return points
def outermost_rings( poly_list ):
""" given a list of Polygons, return indices for those that are not inside
any other polygon
"""
areas = array( [p.area for p in poly_list])
order = argsort(-1 * areas) # large to small
outer = []
for i in range(len(order)):
ri = order[i]
# print "Checking to see if poly %d is an outer polygon"%ri
is_exterior = 1
# check polygon ri (the ith largest) against all polygons
# larger than it.
for j in range(i):
rj = order[j]
if poly_list[rj].contains( poly_list[ri] ):
# print "%d contains %d"%(rj,ri)
is_exterior = 0 # ri is contained by rj, so not exterior
break
if is_exterior:
# print "%d is exterior"%ri
outer.append(ri)
return outer
def circumcenter(p1,p2,p3):
ref = p1
p1x = p1[...,0] - ref[...,0] # ==0.0
p1y = p1[...,1] - ref[...,1] # ==0.0
p2x = p2[...,0] - ref[...,0]
p2y = p2[...,1] - ref[...,1]
p3x = p3[...,0] - ref[...,0]
p3y = p3[...,1] - ref[...,1]
vc = zeros( p1.shape, float64)
# taken from TRANSFORMER_gang.f90
dd=2.0*((p1x-p2x)*(p1y-p3y) -(p1x-p3x)*(p1y-p2y))
b1=p1x**2+p1y**2-p2x**2-p2y**2
b2=p1x**2+p1y**2-p3x**2-p3y**2
vc[...,0]=(b1*(p1y-p3y)-b2*(p1y-p2y))/dd + ref[...,0]
vc[...,1]=(b2*(p1x-p2x)-b1*(p1x-p3x))/dd + ref[...,1]
return vc
class TriGridError(Exception):
pass
class NoSuchEdgeError(TriGridError):
pass
class NoSuchCellError(TriGridError):
pass
# cache the results of reading points.dat files for suntans grid files
# maps filenames to point arrays - you should probably
# just copy the array, though, since there is the possibility
# of altering the points array
points_dat_cache = {}
class TriGrid(object):
index = None
edge_index = None
_vcenters = None
verbose = 0
default_clip = None
def __init__(self,sms_fname=None,
tri_basename=None,
suntans_path=None,processor=None,
suntans_reader=None,
tec_file=None,
gmsh_basename=None,
edges=None,points=None,cells=None,
readonly=False):
self.sunreader = None
self._pnt2cells = None
self.readonly = readonly
self.init_listeners()
if sms_fname:
self.read_sms(sms_fname)
elif tri_basename:
self.read_triangle(tri_basename)
elif gmsh_basename:
self.read_gmsh(gmsh_basename)
elif suntans_path:
self.processor = processor
self.suntans_path = suntans_path
self.read_suntans()
elif suntans_reader:
self.processor = processor
self.sunreader = suntans_reader
self.read_suntans()
elif tec_file:
self.read_tecplot(tec_file)
elif points is not None:
self.from_data(points,edges,cells)
else:
# This will create zero-length arrays for everyone.
self.from_data(None,None,None)
def file_path(self,conf_name):
if self.sunreader:
return self.sunreader.file_path(conf_name,self.processor)
else:
if conf_name == 'points':
basename = 'points.dat'
elif conf_name == 'edges':
basename = 'edges.dat'
elif conf_name == 'cells':
basename = 'cells.dat'
else:
raise Exception,"Unknown grid conf. name: "+conf_name
if self.processor is not None and conf_name != 'points':
basename = basename + ".%i"%self.processor
return self.suntans_path + '/' + basename
def from_data(self,points,edges,cells):
if points is None:
self.points = zeros( (0,2), float64 )
else:
self.points = points[:,:2] # discard any z's that come in
if cells is None:
self.cells = zeros( (0,3), int32)
else:
self.cells = cells
if edges is None:
self.edges = zeros((0,5),int32)
else:
ne = len(edges)
# incoming edges may just have connectivity
if edges.shape[1] == 2:
self.edges = zeros( (ne,5), int32)
self.edges[:,:2] = edges
# defaults:
self.edges[:,2] = LAND_EDGE
self.edges[:,3] = UNMESHED
self.edges[:,4] = BOUNDARY
# update based on cell information:
self.set_edge_neighbors_from_cells()
# And make a better guess at edge marks
internal = (self.edges[:,3]>=0) & (self.edges[:,4]>=0)
self.edges[internal,2] = 0
elif edges.shape[1] == 5:
self.edges = edges
else:
raise Exception,"Edges should have 2 or 5 entries per edge"
def set_edge_neighbors_from_cells(self):
"""
"""
iip = array([[0,1],[1,2],[2,0]])
for c in xrange(self.Ncells()):
for pair in iip:
nodes = self.cells[c,pair]
j = self.find_edge(nodes)
if nodes[0] == self.edges[j,0]:
self.edges[j,3] = c
else:
self.edges[j,4] = c
def refresh_metadata(self):
""" Call this when the cells, edges and nodes may be out of sync with indices
and the like.
"""
self.index = None
self.edge_index = None
self._vcenters = None
def read_suntans(self,use_cache=1):
self.read_from = "Suntans"
# read the points:
points_fn = os.path.abspath( self.file_path("points") )
if use_cache and points_dat_cache.has_key(points_fn):
self.points = points_dat_cache[points_fn]
if not self.readonly:
self.points = self.points.copy()
else:
points_fp = open(points_fn)
pnts = []
for line in points_fp:
coords = map(float,line.split())
if len(coords) >= 2:
pnts.append(coords[:2])
self.points = array(pnts)
if use_cache:
if self.readonly:
points_dat_cache[points_fn] = self.points
else:
points_dat_cache[points_fn] = self.points.copy()
# read the cells:
cell_fname = self.file_path("cells")
cells_fp = open(cell_fname)
vcenters = []
cells = []
for line in cells_fp:
line = line.split()
if len(line)==8:
# first two are voronoi center coordinates
vcenters.append( map(float,line[:2]) )
# then three point indices:
cells.append( map(int,line[2:5]) )
self._vcenters = array(vcenters)
self.cells = array(cells)
self.cell_mask = ones( len(self.cells) )
# Edges!
# Each line is endpoint_i endpoint_i edge_marker cell_i cell_i
edge_fname = self.file_path('edges')
# print "Reading edges from %s"%edge_fname
# edges are stored just as in the data file:
# point_i, point_i, marker, cell_i, cell_i
edges_fp = open(edge_fname,"rt")
edges = []
for line in edges_fp:
line = line.split()
if len(line) == 5:
edges.append(map(int,line))
self.edges = array(edges)
def read_gmsh(self,gmsh_basename):
""" reads output from gmsh - gmsh_basename.{nod,ele}
"""
self.fname = gmsh_basename
self.read_from = "GMSH"
self._vcenters = None # will be lazily created
points = loadtxt( self.fname +".nod")
id_offset = int(points[0,0]) # probably one-based
self.points = points[:,1:3]
print "Reading cells"
elements = loadtxt( self.fname +".ele")
self.cells = elements[:,1:4].astype(int32) - id_offset
self.cell_mask = ones( len(self.cells) )
self.make_edges_from_cells()
print "Done"
def read_triangle(self,tri_basename):
""" reads output from triangle, tri_basename.{ele,poly,node}
"""
self.fname = tri_basename
self.read_from = "Triangle"
self._vcenters = None # will be lazily created
points_fp = open(tri_basename + ".node")
Npoints,point_dimension,npoint_attrs,npoint_markers = map(int,points_fp.readline().split())
self.points = zeros((Npoints,2),float64)
id_offset = 0
for i in range(self.Npoints()):
line = points_fp.readline().split()
# pnt_id may be 0-based or 1-based.
pnt_id = int(line[0])
if i == 0:
id_offset = pnt_id
print "Index offset is ",id_offset
# let z component stay 0
self.points[i,:2] = map(float,line[1:3])
points_fp.close()
print "Reading cells"
elements_fp = open(tri_basename + ".ele")
Ncells,node_per_tri,ncell_attrs = map(int,elements_fp.readline().split())
if node_per_tri != 3:
raise Exception,"Please - just use 3-point triangles!"
self.cells = zeros((Ncells,3),int32)
self.cell_mask = ones( len(self.cells) )
for i in range(self.Ncells()):
parsed = map(int,elements_fp.readline().split())
cell_id = parsed[0]
self.cells[i] = array(parsed[1:]) - id_offset
edges_fn = tri_basename + ".edge"
if os.path.exists(edges_fn):
edges_fp = open()
Nedges,nedge_markers = map(int,edges_fp.readline().split())
self.edges = zeros((Nedges,5),int32)
# each edge is stored as: (pnt_a, pnt_b, default_marker,node_1,node_2)
for i in range(Nedges):
idx,pnta,pntb,marker = map(int,edges_fp.readline().split())
# and a bit of work to figure out which cells border this edge:
pnta -= id_offset
pntb -= id_offset
cells_a = self.pnt2cells(pnta)
cells_b = self.pnt2cells(pntb)
adj_cells = list(cells_a.intersection(cells_b))
neighbor1 = adj_cells[0]
if len(adj_cells) == 1:
neighbor2 = -1
else:
neighbor2 = adj_cells[1]
self.edges[i] = [pnta,pntb,marker,neighbor1,neighbor2]
else:
print "No edges - will recreate from cells"
self.make_edges_from_cells()
print "Done"
def read_tecplot(self,fname):
self.read_from = 'tecplot'
self.fname = fname
self._vcenters = None # lazy creation
fp = open(fname)
while 1:
line = fp.readline()
if line.find('ZONE') == 0:
break
import re
m = re.search(r'\s+N=\s*(\d+)\s+E=\s*(\d+)\s',line)
if not m:
print "Failed to parse: "
print line
raise Exception,"Tecplot parsing error"
# first non-blank line has number of cells and edges:
Ncells = int( m.group(2) )
Npoints = int( m.group(1) )
self.points = zeros((Npoints,2),float64)
for i in range(Npoints):
self.points[i,:] = map(float,fp.readline().split())
print "Reading cells"
self.cells = zeros((Ncells,3),int32) # store zero-based indices
self.cell_mask = ones( len(self.cells) )
# we might be reading in the output from ortho, in which
# it reports the number of unique cells, not the real number
# cells
i=0
cell_hash = {}
for line in fp:
pnt_ids = array( map(int,line.split()) )
my_key = tuple(sort(pnt_ids))
if not cell_hash.has_key(my_key):
cell_hash[my_key] = i
# store them as zero-based
self.cells[i] = pnt_ids - 1
i += 1
if i != Ncells:
print "Reading %i cells, but expected to get %i"%(i,self.Ncells)
self.cells = self.cells[:i,:]
# At this point we have enough info to create the edges
self.make_edges_from_cells()
# these are used in some gui code
_cell_centers = None
def cell_centers(self):
if self._cell_centers is None:
self._cell_centers = self.points[self.cells].mean(axis=1)
return self._cell_centers
_edge_centers = None
def edge_centers(self):
if self._edge_centers is None:
self._edge_centers = self.points[self.edges[:,:2]].mean(axis=1)
return self._edge_centers
def ghost_cells(self):
""" Return a bool array, with ghost cells marked True
Ghost cells are determined as any cell with an edge that has marker 6
"""
ghost_edge = self.edges[:,2] == 6
ghost_cells = self.edges[ghost_edge,3:5].ravel()
bitmap = zeros( self.Ncells(), bool8 )
bitmap[ ghost_cells ] = True
return bitmap
def delete_unused_nodes(self):
""" any nodes which aren't in any cells or edges will be removed.
"""
all_nodes = arange(self.Npoints())
cell_nodes = unique(ravel(self.cells))
edge_nodes = unique(ravel(self.edges[:,:2]))
deleted_nodes = nonzero(isnan(self.points[:,0]))[0]
okay_nodes = unique( concatenate( (cell_nodes,edge_nodes,deleted_nodes) ) )
unused = setdiff1d(all_nodes,okay_nodes)
for n in unused:
self.delete_node(n)
def renumber(self):
"""
removes duplicate cells and nodes that are not
referenced by any cell, as well as cells that have been deleted (==-1)
"""
cell_hash = {} # sorted tuples of vertices
new_cells = [] # list of indexes into the old ones
for i in range(self.Ncells()):
my_key = tuple( sort(self.cells[i]) )
if not cell_hash.has_key(my_key) and self.cells[i,0] >= 0:
# we're original and not deleted
cell_hash[my_key] = i # value is ignored...
new_cells.append( i )
self.cells = self.cells[new_cells]
# remove lonesome nodes
active_nodes = unique(ravel(self.cells))
if any(active_nodes) <= 0:
raise Exception,"renumber: Active nodes includes some negative indices"
old_indices = -ones(self.Npoints(),int32)
self.points = self.points[active_nodes]
if any(isnan(self.points)):
raise Exception,"renumber: some points have NaNs!"
# need a mapping from active node to its index -
# explicitly ask for int32 for consistency
new_indices = arange(active_nodes.shape[0],dtype=int32)
old_indices[active_nodes] = new_indices
# map onto the new indices
self.cells = old_indices[self.cells]
if any(self.cells) < 0:
raise Exception,"renumber: after remapping indices, have negative node index in cells"
# clear out stale data
self._pnt2cells = None
self.index = None
self.edge_index = None
self._pnt2edges = None
self._vcenters = None
# rebuild the edges
self.make_edges_from_cells()
# return the mappings so that subclasses can catch up
return {'valid_cells':new_cells,'pointmap':old_indices,
'valid_nodes':active_nodes}
def write_Triangle(self,basename,boundary_nodes=None):
""" duplicate some of the output of the Triangle program -
particularly the .node and .ele files
note that node and cell indices are taken as 1-based.
if boundary_nodes is supplied, it should be an integer valued array of length Npoints,
and give the boundary marker for each node (usually 0 for internal, nonzero for boundary).
this can be used to specify a subset of the boundary nodes for a BC in SWAN.
if not specified, boundary markers will be 0 for internal, 1 for external nodes.
"""
node_fp = open(basename + ".node",'wt')
node_fp.write("%d 2 0 1\n"%(self.Npoints()))
for n in range(self.Npoints()):
if boundary_nodes is not None:
bmark = boundary_nodes[n]
else:
# id x y boundary marker
bmark = 0
if self.boundary_angle(n) != 0:
bmark = 1
node_fp.write("%d %f %f %d\n"%(n+1,self.points[n,0],self.points[n,1], bmark ) )
node_fp.close()
ele_fp = open(basename + ".ele",'wt')
ele_fp.write("%d 3 0\n"%(self.Ncells()))
for i in range(self.Ncells()):
ele_fp.write("%d %d %d %d\n"%(i+1,self.cells[i,0]+1,self.cells[i,1]+1,self.cells[i,2]+1))
ele_fp.close()
def write_obj(self,fname):
""" Output to alias wavefront
- scales points to fall within [0,10]
"""
fp = open(fname,'wt')
pmax = self.points.max(axis=0)
pmin = self.points.min(axis=0)
rng = (pmax-pmin).max()
scaled_points = (self.points - pmin)*(10/rng)
for i in range(self.Npoints()):
fp.write("v %f %f 0.0\n"%(scaled_points[i,0],scaled_points[i,1]))
for i in range(self.Ncells()):
fp.write("f %d %d %d\n"%(self.cells[i,0]+1,
self.cells[i,1]+1,
self.cells[i,2]+1))
fp.close()
def write_tulip(self,fname):
""" Write a basic representation of the grid to a tulip
compatible file
"""
fp = file(fname,'wt')
fp.write("(tlp \"2.0\"\n")
fp.write("(nodes ")
for i in range(self.Npoints()):
if not isnan(self.points[i,0]):
fp.write(" %i"%i )
fp.write(")\n")
for e in range(self.Nedges()):
if self.edges[e,0] >= 0:
fp.write("(edge %i %i %i)\n"%(e,self.edges[e,0],self.edges[e,1]))
# and the locations of the nodes
fp.write("(property 0 layout \"viewLayout\" \n")
for i in range(self.Npoints()):
if not isnan(self.points[i,0]):
fp.write(" (node %i \"(%f,%f,0)\")\n"%(i,self.points[i,0],self.points[i,1]))
fp.write(")\n")
fp.write(")\n")
fp.close()
def write_sms(self,fname):
fp = open(fname,'wt')
fp.write("\n") # seems to start with blank line.
fp.write("%i %i\n"%(self.Ncells(),self.Npoints()))
# each point has three numbers, though the third is apparently
# always 0
for i in range(self.Npoints()):
fp.write("%10i %.11f %.11f %.11f\n"%(i+1,
self.points[i,0],
self.points[i,1],
0.0 ))
# everything is a triangle
# compute area, positive means CCW
# - turns out SMS wants the order to be consistent, but it always *creates* CCW
# triangles. so best to create CCW triangles
bad = self.areas() < 0
n_bad = sum(bad)
if n_bad > 0:
print "Found %i CW triangles that will be reversed"%n_bad
self.cells[bad,: ] = self.cells[bad,::-1]
for i in range(self.Ncells()):
fp.write("%i 3 %i %i %i\n"%(i+1,
self.cells[i,0]+1,
self.cells[i,1]+1,
self.cells[i,2]+1) )
# And then go back and switch the marker for some of the edges:
print "SMS output: omitting boundary information"
fp.write("0 = Number of open boundaries\n")
fp.write("0 = Total number of open boundary nodes\n")
fp.write("0 = Number of land boundaries\n")
fp.write("0 = Total number of land boundary nodes\n")
fp.close()
def areas(self):
""" returns signed area, CCW is positive"""
i = array([0,1,2])
ip = array([1,2,0])
xi = self.points[self.cells[:,i],0]
yi = self.points[self.cells[:,i],1]
xip = self.points[self.cells[:,ip],0]
yip = self.points[self.cells[:,ip],1]
A = 0.5 * (xi*yip-xip*yi).sum(axis=1)
return A
def angles(self):
""" returns [Nc,3] array of internal angles, in radians
"""
triples=np.array( [[0,1,2],[1,2,0],[2,0,1] ] )
all_triples=p.points[p.cells[:,triples]]
delta=np.diff(all_triples,axis=2)
abs_angles=np.arctan2(delta[...,1],delta[...,0])
rel_angles=(abs_angles[...,1] - abs_angles[...,0])
int_angles= np.pi - (rel_angles%(2*np.pi))
return int_angles
def read_sms(self,fname):
self.fname = fname
self.read_from = "SMS"
self._vcenters = None # will be lazily created
fp = open(fname)
# skip leading blank lines
while 1:
line = fp.readline().strip()
if line != "":
break
# first non-blank line has number of cells and edges:
Ncells,Npoints = map(int,line.split())
# each point has three numbers, though the third is apparently
# always 0
self.points = zeros((Npoints,2),float64)
for i in range(Npoints):
line = fp.readline().split()
# pnt_id is 1-based
pnt_id = int(line[0])
self.points[pnt_id-1] = map(float,line[1:3])
print "Reading cells"
self.cells = zeros((Ncells,3),int32) # store zero-based indices, and assume
self.cell_mask = ones( len(self.cells) )
# everything is a triangle
for i in range(Ncells):
parsed = map(int,fp.readline().split())
cell_id = parsed[0]
nvertices = parsed[1]
pnt_ids = array(parsed[2:])
if nvertices != 3:
raise "Assumption of all triangles is not true!"
# store them as zero-based
self.cells[cell_id-1] = pnt_ids - 1
# At this point we have enough info to create the edges
self.make_edges_from_cells()
# And then go back and switch the marker for some of the edges:
print "Reading boundaries"
def read_first_int():
return int(fp.readline().split()[0])
for btype in ['open','land']:
if btype == 'open':
marker = 3 # open - not sure if this is 2 or 3...
else:
marker = 1 # closed
n_boundaries = read_first_int()
print "Number of %s boundaries: %d"%(btype,n_boundaries)
tot_boundary_nodes = read_first_int() # who cares...
for boundary_i in range(n_boundaries):
print "Reading %s boundary %d"%(btype,boundary_i+1)
n_nodes_this_boundary = read_first_int()
for i in range(n_nodes_this_boundary):
node_i = read_first_int() - 1 # zero-based
if i>0:
# update the marker in edges
if node_i < last_node_i:
pa,pb = node_i,last_node_i
else:
pa,pb = last_node_i,node_i
try:
edge_i = self.find_edge((pa,pb))
self.edges[edge_i,2] = marker
except NoSuchEdgeError:
print "Couldn't find edge",(pa,pb)
print self.points[ [pa,pb] ]
raise
last_node_i = node_i
print "Done"
def pnt2cells(self,pnt_i):
if self._pnt2cells is None:
# build hash table for point->cell lookup
self._pnt2cells = {}
for i in range(self.Ncells()):
for j in range(3):
if not self._pnt2cells.has_key(self.cells[i,j]):
self._pnt2cells[self.cells[i,j]] = set()
self._pnt2cells[self.cells[i,j]].add(i)
return self._pnt2cells[pnt_i]
def Nedges(self):
return len(self.edges)
def Ncells(self):
return len(self.cells)
def Npoints(self):
return len(self.points)
_pnt2edges = None
def pnt2edges(self,pnt_i):
if self._pnt2edges is None:
# print "building pnt2edges"
p2e = {}
for e in range(self.Nedges()):
# skip deleted edges
if self.edges[e,2] == DELETED_EDGE:
continue
for p in self.edges[e,:2]:
if not p2e.has_key(p):
p2e[p] = []
p2e[p].append(e)
self._pnt2edges = p2e
if self._pnt2edges.has_key(pnt_i):
return self._pnt2edges[pnt_i]
else:
return []
def boundary_angle(self,pnt_i):
""" returns the interior angle in radians, formed by the
boundary at the given point
"""
edges = self.pnt2edges(pnt_i)
# find the absolute angle of each edge, as an angle CCW from
# east
angle_right=None # the angle of the edge with the domain on the right
angle_left =None # angle of the edge with the domain on the left
for edge in edges:
# only care about the edges on the boundary:
if self.edges[edge,4] != BOUNDARY:
continue
segment = self.edges[edge,:2]
seg_reversed = 0
if segment[0] != pnt_i:
segment = segment[::-1]
seg_reversed = 1
# sanity check
if segment[0] != pnt_i:
raise "Well, where is %d in %s"%(pnt_i,segment)
delta = self.points[segment[1]] - self.points[segment[0]]
angle = arctan2(delta[1],delta[0])
# print "Edge %i to %i has angle %g degrees"%(edge,segment[1],180*angle/pi)
# on which side of this edge is the domain?
my_cell = self.edges[edge,3]
if my_cell == UNMESHED:
# the paver enforces that cell markers are 3=>left,4=>right
# so with the stored order of the edge, the pretend cell center
# is always to the left
if not seg_reversed:
xprod = -1
else:
xprod = 1
else:
my_cell_middle = mean( self.points[ self.cells[my_cell] ] , axis=0 )
delta_middle = my_cell_middle - self.points[pnt_i]
# and cross-product:
xprod = cross(delta_middle,delta)
# print "Cross-product is: ",xprod
if xprod > 0:
# the cell center lies to the right of this edge,
# print "Edge to %i has domain to the right"%segment[1]
angle_right = angle
else:
# print "Edge to %i has domain to the left"%segment[1]
angle_left = angle
if angle_left is None and angle_right is None:
# it's an interior node, so no boundary angle...
return 0.0
if angle_left is None:
print "Angle from point %i with domain to left is None!"%pnt_i
if angle_right is None:
print "Angle from point %i with domain to right is None!"%pnt_i
boundary_angle = (angle_right - angle_left) % (2*pi)
return boundary_angle
def plot_bad_bcs(self):
bad_bcs = ((self.edges[:,2] == 0) != (self.edges[:,4] >= 0))
self.plot(edge_mask = bad_bcs)
def plot_nodes(self,ids=None):
if ids is None:
ids = arange(self.Npoints())
if self.default_clip is not None:
c = self.default_clip
valid = (self.points[:,0] > c[0]) & (self.points[:,0]<c[1]) & \
(self.points[:,1] > c[2]) & (self.points[:,1]<c[3])
ids= ids[valid]
[annotate(str(i),self.points[i]) for i in ids if not isnan(self.points[i,0])]
def plot_edge_marks(self,edge_mask=None,clip=None):
""" label edges with c[nc1]-j[j],mark-c[nc2],
rotated so it reads in the correct orientation for nc1, nc2
edge_mask should be a boolean array of size Nedges()
clip can be a list like matplotlib axis() - [xmin,xmax,ymin,ymax]
"""
if clip is None:
clip = self.default_clip
if edge_mask is None and clip:
ec = self.edge_centers()
edge_mask = self.edges[:,0] >=0 & ((ec[:,0] >= clip[0]) & (ec[:,0]<=clip[1]) \
& (ec[:,1] >= clip[2]) & (ec[:,1]<=clip[3]) )
else:
edge_mask = self.edges[:,0] >= 0
for e in nonzero(edge_mask)[0]:
delta = self.points[ self.edges[e,1]] - self.points[self.edges[e,0]]
angle = arctan2(delta[1],delta[0])
annotate("c%d-j%d,%d-c%d"%(self.edges[e,3],e,self.edges[e,2],self.edges[e,4]),
ec[e],rotation=angle*180/pi - 90,ha='center',va='center')
def plot(self,voronoi=False,line_collection_args={},
all_cells=True,edge_values=None,
edge_mask=None,vmin=None,vmax=None,ax=None,
clip=None):
""" vmin: if nan, don't set an array at all for the edges
clip=[xmin,xmax,ymin,ymax]: additionally mask edges which are not within the given rectangle
edge_values: defaults to the edge marker.
"""
if ax is None:
ax = gca()
if self.Ncells() == 0:
voronoi = False
if voronoi:
self.vor_plot = ax.plot(self.vcenters()[:,0],self.vcenters()[:,1],".")
if self.Nedges() == 0:
return
if edge_mask is None:
if not all_cells:
edge_mask = self.edges[:,4] < 0
else:
edge_mask = self.edges[:,0] >= 0 # ones( self.edges[:,2].shape ) == 1
if sum(edge_mask) == 0:
return
# g.edges[:,:2] pulls out every edge, and just the endpoint
# indices.
# indexing points by this maps the indices to points
# which then has the z-values sliced out
segments = self.points[self.edges[edge_mask,:2]]
clip=clip or self.default_clip
# Apply clip only to valid edges
if clip is not None:
# segments is Nedges * {a,b} * {x,y}
points_visible = (segments[...,0] >= clip[0]) & (segments[...,0]<=clip[1]) \
& (segments[...,1] >= clip[2]) & (segments[...,1]<=clip[3])
# so now clip is a bool array of length Nedges
clip = any( points_visible, axis=1)
segments = segments[clip,...]
line_coll = LineCollection(segments,**line_collection_args)
if vmin is not None and isnan(vmin):
print "Skipping the edge array"
else:
# allow for coloring the edges
if edge_values is None:
edge_values = self.edges[:,2]
edge_values = edge_values[edge_mask]
if clip is not None:
edge_values = edge_values[clip]
line_coll.set_array(edge_values)
if vmin:
line_coll.norm.vmin = vmin
if vmax:
line_coll.norm.vmax = vmax
ax.add_collection(line_coll)
self.edge_collection = line_coll
ax.axis('equal')
if not voronoi:
# the collections themselves do not automatically set the
# bounds of the axis
ax.axis(self.bounds())
return line_coll
def plot_scalar(self,scalar,pdata=None,clip=None,ax=None,norm=None,cmap=None):
""" Plot the scalar assuming it sits at the center of the
cells (i.e. use the voronoi centers)
scalar should be a 1d array, with length the same as the
number of cells
to mask out values, set scalar to nan
"""
if ax is None:
ax = gca()
if not pdata:
# create a numpy array for all of the segments:
# each segment has 4 points so that it closes the triangle
segments = zeros((self.Ncells(),4,2),float64)
for i in range(self.Ncells()):
for j in range(4):
segments[i,j,:] = self.points[self.cells[i,j%3]]
clip=clip or self.default_clip
if clip:
good_points = (self.points[:,0] > clip[0]) & \
(self.points[:,0] < clip[1]) & \
(self.points[:,1] > clip[2]) & \
(self.points[:,1] < clip[3])
# how to map that onto segments?
good_verts = good_points[self.cells]
good_cells = good_verts.sum(axis=1) == 3
segments = segments[good_cells]
scalar = scalar[good_cells]
if len(scalar) == 0:
return None
mask = isnan(scalar)
if any(mask):
segments = segments[~mask]
scalar = scalar[~mask]
if len(scalar) == 0:
return None
patch_coll = PolyCollection(segments,edgecolors='None',antialiaseds=0,norm=norm,cmap=cmap)
# is this sufficient for coloring? YES
patch_coll.set_array(scalar)
pdata = patch_coll
ax.add_collection(patch_coll)
ax.axis('equal')
ax.axis(self.bounds())
else:
pdata.set_array(scalar)
draw()
return pdata
def animate_scalar(self,scalar_frames,post_proc=None):
clf() # clear figure, to get rid of colorbar, too
vmin = scalar_frames.min()
vmax = scalar_frames.max()
print "Max,min: ",vmax,vmin
pdata = self.plot_scalar(scalar_frames[0])
title("Step 0")
pdata.norm.vmin = vmin
pdata.norm.vmax = vmax
colorbar(pdata)
show()
for i in range(1,scalar_frames.shape[0]):
title("Step %d"%i)
self.plot_scalar(scalar_frames[i],pdata)
if post_proc:
post_proc()
def scalar_contour(self,scalar,V=10,smooth=True):
""" Generate a collection of edges showing the contours of a
cell-centered scalar.
V: either an int giving the number of contours which will be
evenly spaced over the range of the scalar, or a sequence
giving the exact contour values.
smooth: control whether one pass of 3-point smoothing is
applied.
returns a LineCollection
"""
if type(V) == int:
V = linspace( nanmin(scalar),nanmax(scalar),V )
disc = searchsorted(V,scalar) # nan=>last index
nc1 = self.edges[:,3]
nc2 = self.edges[:,4].copy()
nc2[nc2<0] = nc1[nc2<0]
to_show = (disc[nc1]!=disc[nc2]) & isfinite(scalar[nc1]+scalar[nc2])
segs = self.points[ self.edges[to_show,:2], :]
joined_segs = join_features.merge_lines(segments=segs)
# Smooth those out some...
def smooth_seg(seg):
seg = seg.copy()
seg[1:-1,:] = (2*seg[1:-1,:] + seg[0:-2,:] + seg[2:,:])/4.0
return seg
if smooth:
simple_segs = [smooth_seg(seg) for seg in joined_segs]
else:
simple_segs = joined_segs
ecoll = LineCollection(simple_segs)
ecoll.set_edgecolor('k')
return ecoll
def bounds(self):
valid = isfinite(self.points[:,0])
return (self.points[valid,0].min(),self.points[valid,0].max(),
self.points[valid,1].min(),self.points[valid,1].max() )
def vcenters(self):
if self._vcenters is None:
p1 = self.points[self.cells[:,0]]
p2 = self.points[self.cells[:,1]]
p3 = self.points[self.cells[:,2]]
self._vcenters = circumcenter(p1,p2,p3)
return self._vcenters
def faces(self,i):
# returns an 3 element array giving the edge indices for the
# cell i
# the 0th edge goes from the 0th vertex to the 1st.
f = array([-1,-1,-1])
for nf in range(3):
f[nf] = self.find_edge( (self.cells[i,nf],self.cells[i,(nf+1)%3]) )
return f
def write_cells_shp(self,shpname,cell_mask=None,overwrite=False,fields=None):
"""
fields: a structure array of fields to write out - see wkb2shp
"""
import wkb2shp
if cell_mask is None:
cell_mask = slice(None)
polys = self.points[self.cells[cell_mask,:],:]
tris = [geometry.Polygon(p) for p in polys]
wkb2shp.wkb2shp(shpname,tris,overwrite=overwrite,fields=fields[cell_mask])
def write_shp(self,shpname,only_boundaries=1,edge_mask=None,overwrite=0):
""" Write some portion of the grid to a shapefile.
If only_boundaries is specified, write out only the edges that have non-zero marker
For starters, this writes every edge as a separate feature, but at some point it
may make polygons out of the edges.
"""
if edge_mask is None:
if only_boundaries:
edge_mask = (self.edges[:,2] != 0)
else:
edge_mask = (self.edges[:,0]>=0)
if overwrite and os.path.exists(shpname):
# hopefully it's enough to just remove the .shp, and not worry about
# the other files.
os.unlink(shpname)
# Create the shapefile
drv = ogr.GetDriverByName('ESRI Shapefile')
ods = drv.CreateDataSource(shpname)
srs = osr.SpatialReference()
srs.SetFromUserInput('EPSG:26910')
olayer = ods.CreateLayer(shpname,
srs=srs,
geom_type=ogr.wkbLineString)
edge_field = olayer.CreateField(ogr.FieldDefn('edge',ogr.OFTInteger))
marker_field = olayer.CreateField(ogr.FieldDefn('marker',ogr.OFTInteger))
fdef = olayer.GetLayerDefn()
for j in nonzero(edge_mask)[0]:
e = self.edges[j]
geo = geometry.LineString( [self.points[e[0]], self.points[e[1]]] )
new_feat_geom = ogr.CreateGeometryFromWkb( geo.wkb )
feat = ogr.Feature(fdef)
feat.SetGeometryDirectly(new_feat_geom)
# force to python int, as numpy types upset swig.
feat.SetField('edge',int(j))
feat.SetField('marker',int(e[2]))
olayer.CreateFeature(feat)
olayer.SyncToDisk()
def write_contours_shp(self,shpname,cell_depths,V,overwrite=False):
""" like write_shp, but collects edges for each depth in V.
"""
# because that's how suntans reads depth - no sign
V = abs(V)
cell_depths = abs(cell_depths)
if overwrite and os.path.exists(shpname):
os.unlink(shpname)
# Create the shapefile
drv = ogr.GetDriverByName('ESRI Shapefile')
ods = drv.CreateDataSource(shpname)
srs = osr.SpatialReference()
srs.SetFromUserInput('EPSG:26910')
olayer = ods.CreateLayer(shpname,
srs=srs,
geom_type=ogr.wkbLineString)
# create some fields:
olayer.CreateField(ogr.FieldDefn('depth',ogr.OFTReal))
olayer.CreateField(ogr.FieldDefn('edge',ogr.OFTInteger))
fdef = olayer.GetLayerDefn()
internal = (self.edges[:,4] >= 0)
for v in V:
print "Finding contour edges for depth=%f"%v
# These could be tweaked a little bit to get closed polygons
on_contour = (cell_depths[self.edges[:,3]] <= v ) != (cell_depths[self.edges[:,4]] <= v)
edge_mask = on_contour & internal
for j in nonzero(edge_mask)[0]:
e = self.edges[j]
geo = geometry.LineString( [self.points[e[0]], self.points[e[1]]] )
new_feat_geom = ogr.CreateGeometryFromWkb( geo.wkb )
feat = ogr.Feature(fdef)
feat.SetGeometryDirectly(new_feat_geom)
feat.SetField('depth',float(v))
feat.SetField('edge',int(j))
olayer.CreateFeature(feat)
olayer.SyncToDisk()
def carve_thalweg(self,depths,threshold,start,mode,max_count=None):
""" Ensures that there is a path of cells from the given start edge
to deep water with all cells of at least threshold depth.
start: edge index
depths and threshold should all be as *soundings* - i.e. positive
mode is 'cells' - cell-centered depths
or 'edges' - edge-centered depths
max_count: max number of cells/edges to deepen along the path (starting
at start).
Modifies depths in place.
"""
c = self.edges[start,3]
# approach: breadth-first search for a cell that is deep enough.
# Track who's been visited -
# this records the index of the cell from which this cell was visited.
visitors = -1 * ones(self.Ncells(),int32)
# Initialize the list of cells to visit
stack = [c]
visitors[c] = c # sentinel - visits itself
gold = None
try:
while 1:
new_stack = []
for c in stack:
# find the neighbors of this cell:
edges = self.cell2edges(c)
for e in edges:
if mode == 'edges' and depths[e] > threshold:
gold = c
raise StopIteration
# find the neighbor cell
if self.edges[e,3] == c:
nc = self.edges[e,4]
else:
nc = self.edges[e,3]
# have the neighbor, but should we visit it?
if nc < 0 or visitors[nc] >= 0:
continue
visitors[nc] = c
new_stack.append(nc)
if mode == 'cells' and depths[nc] > threshold:
gold = nc
raise StopIteration
# everyone at this level has been visited and we haven't hit gold.
# on to the next ring of neighbors:
stack=new_stack
except StopIteration:
pass
# then trace back and update all the depths that are too small
c = gold
along_the_path = []
while c != visitors[c]:
if mode == 'edges':
e = self.cells2edge(c,visitors[c])
along_the_path.append(e)
#if depths[e] < threshold:
# depths[e] = threshold
c=visitors[c]
if mode == 'cells':
along_the_path.append(c)
#if depths[c] < threshold:
# depths[c] = threshold
if max_count is None or max_count > len(along_the_path):
max_count = len(along_the_path)
for item in along_the_path[-max_count:]:
if depths[item] < threshold:
depths[item] = threshold
# Take care of starting edge
if mode == 'edges' and depths[start] < threshold:
depths[start] = threshold
def write_mat(self,fn,order='ccw'):
from scipy.io import savemat
if order is 'ccw':
cslice=slice(None)
elif order is 'cw':
cslice=slice(None,None,-1)
else:
raise Exception("Bad order: %s"%order)
d={}
d['points'] = self.points
# to 1-based
d['cells'] = 1+self.cells[:,cslice]
d['edges'] = 1+self.edges[:,:2]
d['edge_to_cells'] = 1+self.edges[:,3:5]
d['edge_mark']=self.edges[:,2]
d['cell_circumcenters']=self.vcenters()
d['readme']="\n".join(["points: [Npoints,2] node locations",
"cells: [Ncells,3] - one-based index into points, CCW order",
"edges: [Nedges,2] - one-based nodes for each edge",
"edge_to_cells: [Nedges,2] - left/right cell index for each edge.",
" right_cell=-1 if on the border",
"edge_mark: [Nedges] - 0 for internal edge, 1 for boundary",
"cell_circumcenters: [Ncells,2] x/y location of circumcenter (i.e. Delaunay center)"])
savemat(fn,d)
def write_suntans(self,pathname):
""" create cells.dat, edges.dat and points.dat
from the TriGrid instance, all in the directory
specified by pathname
"""
if not os.path.exists(pathname):
print "Creating folder ",pathname
os.makedirs(pathname)
# check for missing BCs
missing_bcs = (self.edges[:,2]==0) & (self.edges[:,4]<0)
n_missing = missing_bcs.sum()
if n_missing > 0:
print "WARNING: %d edges are on the boundary but have marker==0"%n_missing
print "Assuming they are closed boundaries!"
# make a copy so that somebody can plot the bad cells afterwards
# with plot_missing_bcs()
my_edges = self.edges.copy()
my_edges[missing_bcs,2] = 1
else:
my_edges = self.edges
cells_fp = open(pathname + "/cells.dat","w")
edges_fp = open(pathname + "/edges.dat","w")
points_fp= open(pathname + "/points.dat","w")
for i in range(self.Npoints()):
points_fp.write("%.5f %.5f 0\n"%(self.points[i,0],self.points[i,1]))
points_fp.close()
# probably this can be done via the edges array
for i in range(self.Ncells()):
# each line in the cell output is
# x, y of voronoi center (I think)
# zero-based point-indices x 3
# zero-based ?cell? indices x 3, for neighbors?
# find the neighbors:
# the first neighbor: need another cell that has
# both self.cells[i,0] and self.cells[i,1] in its
# list.
my_set = set([i])
n = [-1,-1,-1]
for j in 0,1,2:
adj1 = self.pnt2cells(self.cells[i,j])
adj2 = self.pnt2cells(self.cells[i,(j+1)%3])
neighbor = adj1.intersection(adj2).difference(my_set)
if len(neighbor) == 1:
n[j] = neighbor.pop()
cells_fp.write("%.5f %.5f %i %i %i %i %i %i\n"%(
self.vcenters()[i,0],self.vcenters()[i,1],
self.cells[i,0],self.cells[i,1],self.cells[i,2],
n[0],n[1],n[2]))
cells_fp.close()
for edge in my_edges:
# point_id, point_id, edge_type, cell, cell
edges_fp.write("%i %i %i %i %i\n"%(
edge[0],edge[1],
edge[2],
edge[3],edge[4]))
edges_fp.close()
def find_edge(self,nodes):
# this way is slow - most of the time in the array ops
# try:
# e = intersect1d( unique(self.pnt2edges(nodes[0])),
# unique(self.pnt2edges(nodes[1])) )[0]
# except IndexError:
# raise NoSuchEdgeError,str(nodes)
# return e
el0 = self.pnt2edges(nodes[0])
el1 = self.pnt2edges(nodes[1])
for e in el0:
if e in el1:
return e
raise NoSuchEdgeError,str(nodes)
def find_cell(self,nodes):
""" return the cell (if any) that is made up of the given nodes
depends on pnt2cells
"""
try:
cells_a = self.pnt2cells(nodes[0])
cells_b = self.pnt2cells(nodes[1])
cells_c = self.pnt2cells(nodes[2])
c = cells_a.intersection(cells_b).intersection(cells_c)
if len(c) == 0:
raise NoSuchCellError()
elif len(c) > 1:
raise Exception,"Nodes %s mapped to cells %s"%(nodes,c)
else:
return list(c)[0]
except KeyError:
raise NoSuchCellError()
def cell_neighbors(self,cell_id,adjacent_only=0):
""" return array of cell_ids for neighbors of this
cell. here neighbors are defined by sharing a vertex,
not just sharing an edge, unless adjacent_only is specified.
(in which case it only returns cells sharing an edge)
"""
if not adjacent_only:
neighbors = [list(self.pnt2cells(p)) for p in self.cells[cell_id]]
return unique(reduce(lambda x,y: x+y,neighbors))
else:
nbrs = []
for nc1,nc2 in self.edges[self.cell2edges(cell_id),3:5]:
if nc1 != cell_id and nc1 >= 0:
nbrs.append(nc1)
if nc2 != cell_id and nc2 >= 0:
nbrs.append(nc2)
return array(nbrs)
def make_edges_from_cells(self):
# iterate over cells, and for each cell, if it's index
# is smaller than a neighbor or if no neighbor exists,
# write an edge record
edges = []
default_marker = 0
# this will get built on demand later.
self._pnt2edges = None
for i in range(self.Ncells()):
# find the neighbors:
# the first neighbor: need another cell that has
# both self.cells[i,0] and self.cells[i,1] in its
# list.
my_set = set([i])
n = [-1,-1,-1]
for j in 0,1,2:
pnt_a = self.cells[i,j]
pnt_b = self.cells[i,(j+1)%3]
adj1 = self.pnt2cells(pnt_a) # cells that use pnt_a
adj2 = self.pnt2cells(pnt_b) # cells that use pnt_b
# the intersection is us and our neighbor
# so difference out ourselves...
neighbor = adj1.intersection(adj2).difference(my_set)
# and maybe we ge a neighbor, maybe not (we're a boundary)
if len(neighbor) == 1:
n = neighbor.pop()
else:
n = -1
if n==-1 or i<n:
# we get to add the edge:
edges.append((pnt_a,
pnt_b,
default_marker,
i,n))
self.edges = array(edges,int32)
def verify_bc(self,do_plot=True):
""" check to make sure that all grid boundaries have a BC set
"""
# point_i, point_i, marker, cell_i, cell_i
# marker: 0=> internal,1=> closed, 3=> open
# make sure that any internal boundary has a second cell index
# assumes that all edges have the first cell index != -1
bad_edges = find( (self.edges[:,2]==0) & (self.edges[:,4]==-1 ) )
if do_plot:
for e in bad_edges:
bad_points = self.edges[e,0:2]
plot(self.points[bad_points,0],
self.points[bad_points,1],'r-o')
if len(bad_edges) > 0:
print "BAD: there are %d edges without BC that have only 1 cell"%len(bad_edges)
return 0
else:
return 1
def cell2edges(self,cell_i):
if self.cells[cell_i,0] == -1:
raise "cell %i has been deleted"%cell_i
# return indices to the three edges for this cell:
pnts = self.cells[cell_i] # the three vertices
# the k-th edge is opposite the k-th point, like in CGAL
edges = [ self.find_edge( (pnts[(i+1)%3], pnts[(i+2)%3]) ) for i in range(3) ]
return edges
_cell_edge_map = None
def cell_edge_map(self):
""" cell2edges for the whole grid
return an integer valued [Nc,3] array, where [i,k] is the edge index
opposite point self.cells[i,k]
N.B. this is not kept up to date when modifying the grid.
"""
if self._cell_edge_map is None:
cem = zeros( (self.Ncells(),3), int32)
for i in xrange(self.Ncells()):
cem[i,:] = self.cell2edges(i)
self._cell_edge_map = cem
return self._cell_edge_map
def interp_cell_to_edge(self,F):
""" given a field [Nc,...], linearly interpolate
to edges and return [Ne,...] field.
"""
ec = self.edge_centers()
vc = self.vcenters()
nc1 = self.edges[:,3]
nc2 = self.edges[:,4]
nc2[nc2<0] = nc1[nc2<0]
df1 = dist(ec,vc[nc1])
df2 = dist(ec,vc[nc2])
nc1_weight = df2/(df1+df2)
if F.ndim == 2:
nc1_weight = nc1_weight[:,newaxis]
return nc1_weight * F[nc1] + (1-nc1_weight) * F[nc2]
def interp_cell_to_node(self,F):
vals=zeros(self.Npoints(),'f8')
for i in range(self.Npoints()):
cells=list(self.pnt2cells(i))
vals[i]=F[cells].mean()
return vals
def cell_divergence_of_edge_flux(self,edge_flux):
""" edge_flux is assumed to be depth integrated, but not
horizontally integrated - so something like watts per meter
"""
cell_to_edges = self.cell_edge_map() # slow! 30s
ec = self.edge_centers()
vc = self.vcenters()
dxy = ec[cell_to_edges] - vc[:,newaxis,:]
dxy_norm = dist(dxy,0*dxy)
# should be outward normals, [Nc,3 edges,{x,y}]
nxy = dxy / dxy_norm[:,:,newaxis]
# got depth from the start, but need edge length
edge_len = dist( self.points[self.edges[:,0]], self.points[self.edges[:,1]])
flux_divergence = sum(sum(nxy * (edge_len[:,newaxis] * edge_flux)[cell_to_edges],
axis=1),axis=1) # maybe...
return flux_divergence / self.areas()
def smooth_scalar(self,cell_value):
"""
simple method for smoothing a scalar field. note that this is not
conservative of anything! and the degree of smoothing is per cell, not
per area, so results may be misleading.
it does take care not to corrupt valid values with nans during the
smoothing
"""
from scipy.stats import nanmean
nc1 = self.edges[:,3]
nc2 = self.edges[:,4]
nc2[nc2<0] = nc1[nc2<0]
nc12 = self.edges[:,3:5].copy()
boundary = nc12[:,1]<0
nc12[boundary,1] = nc12[boundary,0]
edge_mean = nanmean(cell_value[nc12],axis=1)
# 0.5*(cell_value[nc1] + cell_value[nc2])
# new_values = edge_mean[self.cell_edge_map()].mean(axis=1)
new_values = nanmean(edge_mean[self.cell_edge_map()],axis=1)
# but don't turn nan values into non-nan values
new_values[isnan(cell_value)]=nan
return new_values
def cells2edge(self,nc1,nc2):
e1 = self.cell2edges(nc1)
e2 = self.cell2edges(nc2)
for e in e1:
if e in e2:
return e
raise Exception,"Cells %d and %d don't share an edge"%(nc1,nc2)
def build_index(self):
if self.index is None:
# assemble points into list of (id, [x x y y], None)
if self.verbose > 1:
print "building point index"
# old rtree required that stream inputs have non-interleaved coordinates,
# but new rtree allows for interleaved coordinates all the time.
# best solution probably to specify interleaved=False
tuples = [(i,self.points[i,xxyy],None) for i in range(self.Npoints()) if isfinite(self.points[i,0]) ]
self.index = Rtree(tuples,interleaved=False)
if self.verbose > 1:
print "done"
def build_edge_index(self):
if self.edge_index is None:
print "building edge index"
ec = self.edge_centers()
tuples = [(i,ec[i,xxyy],None) for i in range(self.Nedges())]
self.edge_index = Rtree(tuples,interleaved=False)
print "done"
def closest_point(self,p,count=1,boundary=0):
""" Returns the count closest nodes to p
boundary=1: only choose nodes on the boundary.
"""
if boundary:
# print "Searching for nearby boundary point"
# this is slow, but I'm too lazy to add any sort of index specific to
# boundary nodes. Note that this will include interprocessor boundary
# nodes, too.
boundary_nodes = unique( self.edges[self.edges[:,2]>0,:2] )
dists = sum( (p - self.points[boundary_nodes])**2, axis=1)
order = argsort(dists)
closest = boundary_nodes[ order[:count] ]
# print " done with boundary node search"
if count == 1:
return closest[0]
else:
return closest
else:
if self.index is None:
self.build_index()
p = array(p)
# returns the index of the grid point closest to the given point:
hits = self.index.nearest( p[xxyy], count)
# newer versions of rtree return a generator:
if isinstance( hits, types.GeneratorType):
# so translate that into a list like we used to get.
hits = [hits.next() for i in range(count)]
if count > 1:
return hits
else:
return hits[0]
def closest_edge(self,p):
if self.edge_index is None:
self.build_edge_index()
hits = self.edge_index.nearest( p[xxyy], 1)
# newer versions of rtree return a generator:
if isinstance( hits, types.GeneratorType):
# so translate that into a list like we used to get.
return hits.next()
else:
return hits[0]
def closest_cell(self,p,full=0,inside=False):
"""
full=0: return None if the closest *point* is not in a cell on this subdomain
full=1: exhaustively search all cells, even if the nearest point is not on this subdomain
inside: require that the returned cell contains p, otherwise return None
"""
# rather than carry around another index, reuse the point index
i = self.closest_point(p)
try:
cells = list( self.pnt2cells(i) )
except KeyError:
if not full:
return None
else:
print "This must be on a subdomain. The best point wasn't in one of our cells"
cells = range(self.Ncells())
if inside:
pnt = geometry.Point(p[0],p[1])
for c in cells:
tri = geometry.Polygon(self.points[self.cells[c]])
if tri.contains(pnt):
return c
return None
else:
cell_centers = self.vcenters()[cells]
dists = ((p-cell_centers)**2).sum(axis=1)
chosen = cells[argmin(dists)]
dist = sqrt( ((p-self.vcenters()[chosen])**2).sum() )
# print "Closest cell was %f [m] away"%dist
return chosen
def set_edge_markers(self,pnt1,pnt2,marker):
""" Find the nodes closest to each of the two points,
Search for the shortest path between them on the boundary.
Set all of those edges' markers to marker
"""
n1 = self.closest_point(pnt1)
n2 = self.closest_point(pnt2)
path = self.shortest_path(n1,n2,boundary_only=1)
for i in range(len(path)-1):
e = self.find_edge( path[i:i+2] )
self.edges[e,2] = marker
def shortest_path(self,n1,n2,boundary_only=0,max_cost = inf):
""" dijkstra on the edge graph from n1 to n2
boundary_only: limit search to edges on the boundary (have
a -1 for cell2)
"""
queue = pq.priorityDictionary()
queue[n1] = 0
done = {}
while 1:
# find the queue-member with the lowest cost:
if len(queue)==0:
return None # no way to get there from here.
best = queue.smallest()
best_cost = queue[best]
if best_cost > max_cost:
print "Too far"
return None
del queue[best]
done[best] = best_cost
if best == n2:
# print "Found the ending point"
break
# figure out its neighbors
# This used to use cells, but this query is valid even when there are no cells,
# so don't rely on cells.
#cells = list(self.pnt2cells(best))
#all_points = unique( self.cells[cells] )
edges = self.pnt2edges(best)
all_points = unique( self.edges[edges,:2] )
for p in all_points:
if done.has_key(p):
# both for p and for points that we've already done
continue
if boundary_only:
e = self.find_edge( (best,p) )
if self.edges[e,4] != BOUNDARY:
continue
dist = sqrt( ((self.points[p] - self.points[best])**2).sum() )
new_cost = best_cost + dist
if not queue.has_key(p):
queue[p] = inf
if queue[p] > new_cost:
queue[p] = new_cost
# reconstruct the path:
path = [n2]
while 1:
p = path[-1]
if p == n1:
break
# figure out its neighbors
edges = self.pnt2edges(p)
all_points = unique( self.edges[edges,:2] )
found_prev = 0
for nbr in all_points:
if nbr == p or not done.has_key(nbr):
continue
dist = sqrt( ((self.points[p] - self.points[nbr])**2).sum() )
if done[p] == done[nbr] + dist:
path.append(nbr)
found_prev = 1
break
if not found_prev:
return None
return array( path[::-1] )
def cells_on_line(self,xxyy):
""" Return cells intersecting the given line segment
cells are found based on having vertices which straddle
the line, and cell centers which are within the segment's
extent
"""
m=array([ [xxyy[0],xxyy[2],1],
[xxyy[1],xxyy[3],1],
[1,1,1] ])
b=array([0,0,abs(xxyy).mean()])
line_eq=solve(m,b)
hom_points=concatenate( (self.points,ones((self.Npoints(),1))),axis=1)
pnt_above=dot(hom_points,line_eq)>0
cell_sum=sum(pnt_above[self.cells],axis=1)
straddle=nonzero((cell_sum>0)&(cell_sum<3))[0]
# further limit that to the lateral range of the transect
A=array([xxyy[0],xxyy[2]])
B=array([xxyy[1],xxyy[3]])
vec=B-A
d_min=0
d_max=norm(vec)
vec/=d_max
straddle_dists=(vec[None,:]*(self.vcenters()[straddle]-A)).sum(axis=1)
on_line=(straddle_dists>=d_min)&(straddle_dists<=d_max)
cells_on_line=straddle[on_line]
return cells_on_line
### graph modification api calls
def delete_node_and_merge(self,n):
""" For a degree 2 node, remove it and make one edge out its two edges.
this used to be in paver, but I don't think there is any reason it can't
be here in trigrid.
"""
edges = self.pnt2edges(n)
if self.verbose > 1:
print "Deleting node %d, with edges %s"%(n,edges)
if len(edges) == 2:
if self.verbose > 1:
print " deleting node %d, will merge edges %d and %d"%(n,edges[0],edges[1])
e = self.merge_edges( edges[0], edges[1] )
elif len(edges) != 0:
print "Trying to delete node",n
annotate("del",self.points[n])
print "Edges are:",self.edges[edges]
raise Exception,"Can't delete node with %d edges"%len(edges)
edges = self.pnt2edges(n)
if len(edges) != 0:
print "Should have removed all edges to node %d, but there are still some"%n
self.delete_node(n)
return e
def unmerge_edges(self,e1,e2,e1data,e2data):
self.edges[e1] = e1data
self.edges[e2] = e2data
# too lazy to do this right now, so to be safe just kill it
self._pnt2edges = None
def merge_edges(self,e1,e2):
""" returns the id of the new edge, which for now will always be one of e1 and e2
(and the other will have been deleted
"""
if self.verbose > 1:
print "Merging edges %d %d"%(e1,e2)
print " edge %d: nodes %d %d"%(e1,self.edges[e1,0],self.edges[e1,1])
print " edge %d: nodes %d %d"%(e2,self.edges[e2,0],self.edges[e2,1])
B = intersect1d( self.edges[e1,:2], self.edges[e2,:2] )[0]
# try to keep ordering the same (not sure if this is necessary)
if self.edges[e1,0] == B:
e1,e2 = e2,e1
# push the operation with the re-ordered edge nodes, so that we know (i.e.
# live_dt knows) which of the edges is current, and which is being undeleted.
self.push_op(self.unmerge_edges, e1, e2, self.edges[e1].copy(), self.edges[e2].copy() )
# pick C from e2
if self.edges[e2,0] == B:
C = self.edges[e2,1]
else:
C = self.edges[e2,0]
if self.edges[e1,0] == B:
self.edges[e1,0] = C
A = self.edges[e1,1]
else:
self.edges[e1,1] = C
A = self.edges[e1,0]
# print " nodes are %d %d %d"%(A,B,C)
# this removes e2 from _pnt2edges for B & C
# because of mucking with the edge data, better to handle the
# entire rollback in merge_edges
self.delete_edge(e2,rollback=0)
# fix up edge lookup tables:
if self._pnt2edges is not None:
self._pnt2edges[C].append(e1)
# B is still listed for e1
b_edges = self._pnt2edges[B]
if b_edges != [e1]:
print "Merging edges. Remaining pnt2edges[B=%d] = "%B,b_edges
print "is not equal to e1 = ",[e1]
self._pnt2edges[B] = []
# and callbacks:
self.updated_edge(e1)
return e1
def undelete_node(self,i,p):
self.points[i] = p
if self.index is not None:
self.index.insert(i, self.points[i,xxyy] )
def delete_node(self,i,remove_edges=1):
if self.verbose > 1:
print "delete_node: %d, remove_edges=%s"%(i,remove_edges)
if remove_edges:
# make a copy so that as delete_edge modifies
# _pnt2edges we still have the original list
nbr_edges = list(self.pnt2edges(i))
for e in nbr_edges:
self.delete_edge(e)
self.push_op(self.undelete_node,i,self.points[i].copy())
# nodes are marked as deleted by setting the x coordinate
# to NaN, and remove from index
if self.index is not None:
coords = self.points[i,xxyy]
self.index.delete(i, coords )
self.points[i,0] = nan
self.deleted_node(i)
def undelete_cell(self,c,nodes,edge_updates):
self.cells[c] = nodes
self._vcenters = None # lazy...
for e,vals in edge_updates:
self.edges[e] = vals
if self._pnt2cells is not None:
for i in nodes:
if not self._pnt2cells.has_key(i):
self._pnt2cells[i] = set()
self._pnt2cells[i].add(c)
def delete_cell(self,c,replace_with=-2,rollback=1):
""" replace_with: the value to set on edges that used to reference
this cell. -2 => leave an internal hole
-1 => create an 'island'
"""
nA,nB,nC = self.cells[c]
ab = self.find_edge([nA,nB])
bc = self.find_edge([nB,nC])
ca = self.find_edge([nC,nA])
edge_updates = [ [ab,self.edges[ab].copy()],
[bc,self.edges[bc].copy()],
[ca,self.edges[ca].copy()] ]
self.push_op(self.undelete_cell,c,self.cells[c].copy(),edge_updates)
for e in [ab,bc,ca]:
if self.edges[e,3] == c:
check = 3
elif self.edges[e,4] == c:
check = 4
else:
print "Cell: %d check on edge %d with nbrs: %d %d"%(
c,e,self.edges[e,3],self.edges[e,4])
raise Exception,"Deleting cell, but edge has no reference to it"
self.edges[e,check] = replace_with
# optional - update edge marker, and for now just assume it will
# be a land edge (other BC types are generally handled later anyway)
if replace_with == -1:
# print "Deleting cell and replace_with is",replace_with
if self.edges[e,2] == 0:
# print "Internal edge becoming a land edge"
self.edges[e,2] = LAND_EDGE
self.updated_edge(e)
self.cells[c,:] = -1
if self._vcenters is not None:
self._vcenters[c] = nan
if self._pnt2cells is not None:
for n in [nA,nB,nC]:
self._pnt2cells[n].remove(c)
self.deleted_cell(c)
def undelete_edge(self,e,e_data):
self.edges[e] = e_data
# fix up indexes:
if self._pnt2edges is not None:
for n in self.edges[e,:2]:
if not self._pnt2edges.has_key(n):
self._pnt2edges[n] = []
self._pnt2edges[n].append(e)
if self.edge_index is not None:
coords = self.edge_centers()[e][xxyy]
self.edge_index.insert(e,coords)
def delete_edge(self,e,rollback=1):
""" for now, just make it into a degenerate edge
specify rollback=0 to skip recording the undo information
"""
if self.verbose > 1:
print "Deleting edge %d:"%e
# remove any neighboring cells first
cell_nbrs = self.edges[e,3:5]
if any(cell_nbrs == -1):
replace_with = -1
else:
replace_with = -2
for c in cell_nbrs:
if c >= 0:
self.delete_cell(c,replace_with=replace_with,rollback=rollback)
# clear out indexes
if self._pnt2edges is not None:
self._pnt2edges[self.edges[e,0]].remove(e)
self._pnt2edges[self.edges[e,1]].remove(e)
if self.edge_index is not None:
coords = self.edge_centers()[e][xxyy]
self.edge_index.delete(e,coords)
if rollback:
self.push_op(self.undelete_edge,e,self.edges[e].copy())
# mark edge deleted
self.edges[e,:2] = -37
self.edges[e,2] = DELETED_EDGE # DELETED
self.edges[e,3:5] = -37
# signal to anyone who cares
self.deleted_edge(e)
def valid_edges(self):
""" returns an array of indices for valid edges - i.e. not deleted"""
return nonzero(self.edges[:,2]!=DELETED_EDGE)[0]
def split_edge(self,nodeA,nodeB,nodeC):
""" take the existing edge AC and insert node B in the middle of it
nodeA: index to node on one end of the existing edge
nodeB: (i) index to new new node in middle of edge,
(ii) tuple (coords, dict for add_node options)
may be extended to allow arbitrary options for point
nodeC: index to node on other end of existing edge
"""
e1 = self.find_edge([nodeA,nodeC])
if isinstance(nodeB,tuple):
pntB,pntBopts=nodeB
nodeB=None
else:
pntB=self.points[nodeB]
if any( self.edges[e1,3:5] >= 0 ):
print "While trying to split the edge %d (%d-%d) with node %s"%(e1,nodeA,nodeC,nodeB)
annotate(str(nodeA),self.points[nodeA])
annotate(str(nodeB),pntB)
annotate(str(nodeC),self.points[nodeC])
print "The cell neighbors of the edge are:",self.edges[e1,3:5]
raise Exception,"You can't split an edge that already has cells"
# 2011-01-29: this used to be in the opp. order - but that implies
# an invalid state
self.push_op(self.unmodify_edge,e1,self.edges[e1].copy())
# 2014-11-06: for a nodeB colinear with nodeA-nodeC, this is the
# more appropriate time to create nodeB
if nodeB is None:
nodeB=self.add_node(pntB,**pntBopts)
self.push_op(self.unadd_edge,self.Nedges())
self.edges = array_append( self.edges, self.edges[e1] )
e2 = self.Nedges() - 1
# first make the old edge from AC to AB
if self.edges[e1,0] == nodeC:
self.edges[e1,0] = nodeB
self.edges[e2,1] = nodeB
else:
self.edges[e1,1] = nodeB
self.edges[e2,0] = nodeB
# handle updates to indices
# update pnt2edges
if self._pnt2edges is not None:
# nodeA is fine.
# nodeB has to get both edges:
self._pnt2edges[nodeB] = [e1,e2]
# nodeC
i = self._pnt2edges[nodeC].index(e1)
self._pnt2edges[nodeC][i] = e2
self.updated_edge(e1)
self.created_edge(e2)
return e2
def unadd_edge(self,old_length):
#print "unadding edge %d"%old_length
new_e = old_length
if self._pnt2edges is not None:
for n in self.edges[new_e,:2]:
self._pnt2edges[n].remove(new_e)
self.edges = self.edges[:old_length]
def unmodify_edge(self, e, old_data):
# print "unmodifying edge %d reverting to %s"%(e,old_data)
if self._pnt2edges is not None:
a,b = self.edges[e,:2]
self._pnt2edges[a].remove(e)
self._pnt2edges[b].remove(e)
a,b = old_data[:2]
self._pnt2edges[a].append(e)
self._pnt2edges[b].append(e)
self.edges[e] = old_data
def add_edge(self,nodeA,nodeB,marker=0,cleft=-2,cright=-2,coerce_boundary=None):
""" returns the number of the edge
for cells that are marked -2, this will check to see if a new cell can
be made on that side with other unmeshed edges
"""
# print "trigrid: Adding an edge between %s and %s"%(nodeA,nodeB)
try:
e = self.find_edge([nodeA,nodeB])
raise Exception,"edge between %d and %d already exists"%(nodeA,nodeB)
except NoSuchEdgeError:
pass
# dynamic resizing for edges:
self.push_op(self.unadd_edge,len(self.edges))
self.edges = array_append( self.edges, [nodeA,nodeB,marker,cleft,cright] )
this_edge = self.Nedges()-1
edge_ab = this_edge # for consistency in the mess of code below
# print "This edge: ",this_edge
self.cells_from_last_new_edge = []
if cleft == -2 or cright == -2:
# First get any candidates, based just on connectivity
edges_from_a = self.pnt2edges(nodeA)
edges_from_b = self.pnt2edges(nodeB)
neighbors_from_a = setdiff1d( self.edges[edges_from_a,:2].ravel(), [nodeA,nodeB] )
neighbors_from_b = setdiff1d( self.edges[edges_from_b,:2].ravel(), [nodeA,nodeB] )
# nodes that are connected to both a and b
candidates = intersect1d( neighbors_from_a, neighbors_from_b )
if len(candidates) > 0:
# is there a candidate on our right?
ab = self.points[nodeB] - self.points[nodeA]
ab_left = rot(pi/2,ab)
new_cells = []
for c in candidates:
ac = self.points[c] - self.points[nodeA]
if dot(ac,ab_left) < 0: # this one is on the right of AB
# make a stand-in A & B that are in CCW order in this cell
ccwA,ccwB = nodeB,nodeA
check_cell_ab = 4 # the relevant cell for the new edge
else:
ccwA,ccwB = nodeA,nodeB
check_cell_ab = 3
edge_ac = self.find_edge((ccwA,c))
if self.edges[edge_ac,0] == ccwA:
# then the edge really is stored ac
check_cell_ac = 4
else:
check_cell_ac = 3
edge_bc = self.find_edge((ccwB,c))
if self.edges[edge_bc,0] == ccwB:
check_cell_bc = 3
else:
check_cell_bc = 4
# so now we have edge_ab, edge_ac, edge_bc as edge ids for the
# edges that make up a new cell, and corresponding check_cell_ab
# check_cell_ac and check_cell_bc that index the adj. cell that is
# facing into this new cell.
ccw_edges = [edge_ab,edge_bc,edge_ac]
check_cells = [check_cell_ab, check_cell_bc, check_cell_ac]
adj_ids = [ self.edges[e,check]
for e,check in zip(ccw_edges,check_cells) ]
adj_ids = array( adj_ids )
if any(adj_ids >= 0) and any( adj_ids != adj_ids[0]):
# bad. one edge thinks there is already a cell here, but
# the others doesn't agree.
print "During call to add_edge(nodeA=%d nodeB=%d marker=%d cleft=%d cright=%d coerce=%s"%(nodeA,
nodeB,
marker,
cleft,
cright,
coerce_boundary)
raise Exception,"cell neighbor values for new cell using point %d are inconsistent: %s"%(c,adj_ids)
elif all(adj_ids == -1):
# leave them be, no new cell, all 3 edges are external
pass
elif coerce_boundary == -1:
# no new cell - everybody gets -1
self.edges[edge_ab,check_cell_ab] = -1
self.edges[edge_ac,check_cell_ac] = -1
self.edges[edge_bc,check_cell_bc] = -1
elif all( adj_ids == -2 ) or coerce_boundary == -2:
# make new cell, everybody gets that cell id
# Create the cell and get it's id:
new_cells.append( self.add_cell([ccwA,ccwB,c]) )
# update everybody's cell markers:
self.push_op(self.unmodify_edge, edge_ac, self.edges[edge_ac].copy() )
self.push_op(self.unmodify_edge, edge_bc, self.edges[edge_bc].copy() )
self.edges[edge_ac,check_cell_ac] = new_cells[-1]
self.edges[edge_bc,check_cell_bc] = new_cells[-1]
self.edges[edge_ab,check_cell_ab] = new_cells[-1]
# extend boundary - the fun one
# only when there was an external edge that now falls inside
# the new cell => mark the *other* side of the other edges to
# -1
if any(adj_ids==-1):
for i in range(3):
if adj_ids[i] == -2:
# make its outside cell a -1
# the 7-check gives us the outside cell nbr
self.edges[ccw_edges[i],7-check_cells[i]] = -1
# go ahead and set a closed edge, too
self.edges[ccw_edges[i],2] = LAND_EDGE
else:
# as long as this edge wasn't originally -1,-1
# (which ought to be illegal), it's safe to say
# that it is now internal
self.edges[ccw_edges[i],2] = 0 # internal
# either way, let people know that markers have changed,
# but wait until later to signal on the new edge since
# it is not in the indices yet
if ccw_edges[i] != edge_ab:
self.updated_edge(ccw_edges[i])
self.cells_from_last_new_edge = new_cells
# update pnt2edges
if self._pnt2edges is not None:
for n in [nodeA,nodeB]:
if not self._pnt2edges.has_key(n):
self._pnt2edges[n] = []
# print "Adding edge %d to list for node %d"%(this_edge,n)
self._pnt2edges[n].append(this_edge)
self.created_edge(this_edge)
return this_edge
def unadd_node(self,old_length):
if self.index is not None:
curr_len = len(self.points)
for i in range(old_length,curr_len):
coords = self.points[i,xxyy]
self.index.delete(i, coords )
self.points = self.points[:old_length]
def add_node(self,P):
P = P[:2]
self.push_op(self.unadd_node,len(self.points))
self.points = array_append( self.points, P )
new_i = self.Npoints() - 1
if self.index is not None:
# print "Adding new node %d to index at "%new_i,self.points[new_i,xxyy]
self.index.insert(new_i, self.points[new_i,xxyy] )
self.created_node(new_i)
return new_i
def unadd_cell(self,old_length):
# remove entries from _pnt2cells
# the cell that was added is at the end:
if self._pnt2cells is not None:
new_c = old_length
for n in self.cells[new_c]:
self._pnt2cells[n].remove(new_c)
self.cells = self.cells[:old_length]
def add_cell(self,c):
self.push_op(self.unadd_cell,len(self.cells))
c = array(c,int32)
i = array([0,1,2])
ip = array([1,2,0])
xi = self.points[c[i],0]
yi = self.points[c[i],1]
xip = self.points[c[ip],0]
yip = self.points[c[ip],1]
A = 0.5 * (xi*yip-xip*yi).sum()
if A < 0:
print "WARNING: attempt to add CW cell. Reversing"
c = c[::-1]
# self.cells = concatenate( (self.cells, [c]) )
self.cells = array_append( self.cells, c )
self._vcenters = None
this_cell = self.Ncells() - 1
if self._pnt2cells is not None: # could be smarter and actually update.
for i in c:
if not self._pnt2cells.has_key(i):
self._pnt2cells[i] = set()
self._pnt2cells[i].add(this_cell)
self.created_cell(this_cell)
return this_cell
def edges_to_rings(self, edgemask=None, ccw=1):
""" using only the edges for which edgemask is true,
construct rings. if edgemask is not given, use all of the
current edges
if ccw is 1, only non-intersecting ccw rings will be return
if ccw is 0, only non-intersecting cw rings will be return
"""
if edgemask is not None:
edges = self.edges[edgemask,:2]
masked_grid = TriGrid(points=self.points,edges=edges)
return masked_grid.edges_to_rings(edgemask=None)
# remember which edges have already been assigned to a ring
edges_used = zeros( self.Nedges(), int8 )
rings = []
for start_e in range(self.Nedges()):
if edges_used[start_e]:
continue
# start tracing with the given edge -
# it's hard to know beforehand which side of this edge is facing into
# the domain, so start with the assumption that it obeys our convention
# that going from edge[i,0] to edge[i,1] the interior is to the left
# once a ring has been constructed, check to see if it has negative area
# in which case we repeat the process with the opposite ordering.
# one problem, though, is that interior rings really should have negative
# area, since they will become holes.
# at least the one with the largest area is correct.
# Then any that are inside it should have negative areas...
# what if we found all rings with positive area and all with negative
# area. then we'd have all the information ready for choosing who is
# inside whom, and which orientation is correct?
failed_edges_used1 = None
failed_edges_used2 = None
for flip in [0,1]:
e = start_e
edges_used[e] = 1 # tentatively used.
a,b = self.edges[e,:2]
if flip:
a,b = b,a
if self.verbose > 1:
print "Starting ring trace with nodes ",a,b
ring = [a,b] # stores node indices
node_count = 1
while 1:
node_count += 1
# used to be node_count > self.Npoints(), but since we step
# one extra bit around the circle, then go back and remove one
# node, I think it should be 1+.
if node_count > 1+self.Npoints():
# debug
self.plot()
pnts = self.points[ring]
plot(pnts[:,0],pnts[:,1],'ro')
# /debug
raise Exception,"Traced too far. Something is wrong. bailing"
b_edges = self.pnt2edges(b)
if len(b_edges) == 2:
# easy case - one other edge leaves.
# new edge e
if b_edges[0] == e:
e = b_edges[1]
else:
e = b_edges[0]
# # setdiff1d isn't very fast...
# e = setdiff1d(b_edges,[e])[0]
else:
# calculate angles for all the edges, CCW relative to the
# x-axis (atan2 convention)
angles = []
for next_e in b_edges:
c = setdiff1d(self.edges[next_e,:2],[b])[0]
d = self.points[c] - self.points[b]
angles.append( arctan2(d[1],d[0]) )
angles = array(angles)
e_idx = b_edges.index(e)
e_angle = angles[e_idx]
angles = (angles-e_angle) % (2*pi)
next_idx = argsort(angles)[-1]
e = b_edges[next_idx]
# # setdiff1d is slow. do this manually
# c = setdiff1d(self.edges[e,:2],[b])[0]
if self.edges[e,0] == b:
c = self.edges[e,1]
else:
c = self.edges[e,0]
# print " next node in trace: ",c
# now we have a new edge e, and the next node in the ring c
if edges_used[e] == 0:
edges_used[e] = 1 # mark as tentatively used
else:
# we guessed wrong, and now we should just bail and flip the
# other way but it's not so slow just to keep going and figure it
# out later.
# print "Could be smarter and abort now."
pass
if len(ring) >= 2 and b==ring[0] and c == ring[1]:
#print " %d,%d == %d,%d we've come full circle. well done"%(b,c,
# ring[0],ring[1])
break
ring.append(c)
a,b = b,c
# remove that last one where we figured out that we were really all the way
# around.
ring = ring[:-1]
points = self.points[ring]
if bool(ccw) == bool(is_ccw(points)):
# print "great, got correctly oriented ring (ccw=%s)"%ccw
edges_used[ edges_used==1 ] = 2 # really used
rings.append( array(ring) )
break # breaks out of the flip loop
else:
# print "ring orientation wrong, wanted ccw=%s"%ccw
if flip:
area = signed_area(points)
if self.verbose > 1:
print "Failed to get positive area either way:"
print "Ring area is ",area
if isnan(area):
print "Got nan area:"
print points
raise Exception,"NaN area trying to figure out rings"
# raise Exception,"Failed to make positive area ring in either direction"
# I think this is actually valid - when Angel Island gets joined to
# Tiburon, if you start on Angel island either way you go you trace
# a region CCW.
# however, nodes that were visited in both directions
# should 'probably' be marked so we don't visit them more.
# really not sure how this will fair with a multiple-bowtie
# issue...
failed_edges_used2 = where(edges_used==1)[0]
edges_used[ intersect1d( failed_edges_used1,
failed_edges_used2 ) ] = 2
# otherwise try again going the other direction,
# unmark edges, but remember them
failed_edges_used1 = where(edges_used==1)[0]
edges_used[ edges_used==1 ] = 0 # back into the pool
if self.verbose > 0:
print "Done creating rings: %d rings in total"%len(rings)
return rings
def edges_to_polygons(self,edgemask):
""" use the edges (possibly masked by given edgemask) to create
a shapely.geometry.Polygon() for each top-level polygon, ordered
by decreasing area
"""
rings_and_holes = self.edges_to_rings_and_holes(edgemask)
polys = []
for r,inner_rings in rings_and_holes:
outer_points = self.points[r]
inner_points = [self.points[ir] for ir in inner_rings]
polys.append( geometry.Polygon( outer_points, inner_points) )
areas = array([p.area for p in polys])
order = argsort(-1 * areas)
return [polys[i] for i in order]
def edges_to_rings_and_holes(self,edgemask):
""" using only the edges for which edgemask is true,
construct polygons with holes. if edgemask is not given, use all of the
current edges
This calls edges_to_rings to get both ccw rings and cw rings, and
then determines which rings are inside which.
returns a list [ [ outer_ring_nodes, [inner_ring1,inner_ring2,...]], ... ]
"""
if edgemask is not None:
edges = self.edges[edgemask,:2]
masked_grid = TriGrid(points=self.points,edges=edges)
return masked_grid.edges_to_rings_and_holes(edgemask=None)
# print "calling edges_to_rings (ccw)"
ccw_rings = self.edges_to_rings(ccw=1)
# print "calling edges_to_rings (cw)"
cw_rings = self.edges_to_rings(ccw=0)
# print "constructing polygons"
# make single-ring polygons out of each:
ccw_polys = [geometry.Polygon(self.points[r]) for r in ccw_rings]
cw_polys = [geometry.Polygon(self.points[r]) for r in cw_rings]
# assume that the ccw poly with the largest area is the keeper.
# technically we should consider all ccw polys that are not inside
# any other poly
ccw_areas = [p.area for p in ccw_polys]
outer_rings = outermost_rings( ccw_polys )
# Then for each outer_ring, search for cw_polys that fit inside it.
outer_polys = [] # each outer polygon, followed by a list of its holes
# print "finding the nesting order of polygons"
for oi in outer_rings:
outer_poly = ccw_polys[oi]
# all cw_polys that are contained by this outer ring.
# This is where the predicate error is happening -
possible_children_i = []
for i in range(len(cw_polys)):
try:
if i!=oi and outer_poly.contains( cw_polys[i] ):
if not cw_polys[i].contains(outer_poly):
possible_children_i.append(i)
else:
print "Whoa - narrowly escaped disaster with a congruent CW poly"
except shapely.predicates.PredicateError:
print "Failed while comparing rings - try negative buffering"
d = sqrt(cw_polys[i].area)
inner_poly = cw_polys[i].buffer(-d*0.00001,4)
if outer_poly.contains( inner_poly ):
possible_children_i.append(i)
# the original list comprehension, but doesn't handle degenerate
# case
# possible_children_i = [i for i in range(len(cw_polys)) \
# if outer_poly.contains( cw_polys[i] ) and i!=oi ]
possible_children_poly = [cw_polys[i] for i in possible_children_i]
# of the possible children, only the ones that are inside another child are
# really ours. outermost_rings will return indices into possible_children, so remap
# those back to proper cw_poly indices to get children.
children = [possible_children_i[j] for j in outermost_rings( possible_children_poly )]
outer_polys.append( [ccw_rings[oi],
[cw_rings[i] for i in children]] )
return outer_polys
def select_edges_by_polygon(self,poly):
ecs=self.edge_centers()
return nonzero( [poly.contains( geometry.Point(ec)) for ec in ecs] )[0]
def trim_to_left(self, path):
""" Given a path, trim all cells to the left of it.
"""
# mark the cut edges:
for i in range(len(path)-1):
e = self.find_edge( path[i:i+2] )
if self.edges[e,2] == 0 or self.edges[e,2] == CUT_EDGE:
# record at least ones that is really cut, in case some of
# of the cut edges are actually on the boundary
cut_edge = (path[i],path[i+1],e)
self.edges[e,2] = CUT_EDGE
# choose the first cell, based on the last edge that was touched above:
# the actual points:
a = self.points[cut_edge[0]]
b = self.points[cut_edge[1]]
# the edge index
edge = cut_edge[2]
# the two cells that form this edge:
cell1,cell2 = self.edges[edge,3:]
other_point1 = setdiff1d( self.cells[cell1], cut_edge[:2] )[0]
other_point2 = setdiff1d( self.cells[cell2], cut_edge[:2] )[0]
parallel = (b-a)
# manually rotate 90deg CCW
bad = array([ -parallel[1],parallel[0]] )
if dot(self.points[other_point1],bad) > dot(self.points[other_point2],bad):
bad_cell = cell1
else:
bad_cell = cell2
print "Deleting"
self.recursive_delete(bad_cell)
print "Renumbering"
self.renumber()
def recursive_delete(self,c,renumber = 1):
del_count = 0
to_delete = [c]
# things the queue have not been processed at all...
while len(to_delete) > 0:
# grab somebody:
c = to_delete.pop()
if self.cells[c,0] == -1:
continue
# get their edges
nodea,nodeb,nodec = self.cells[c]
my_edges = [self.find_edge( (nodea,nodeb) ),
self.find_edge( (nodeb,nodec) ),
self.find_edge( (nodec,nodea) ) ]
# mark it deleted:
self.cells[c,0] = -1
del_count += 1
# add their neighbors to the queue to be processed:
for e in my_edges:
if self.edges[e,2] == 0:# only on non-cut, internal edges:
c1,c2 = self.edges[e,3:]
if c1 == c:
nbr = c2
else:
nbr = c1
if nbr >= 0:
to_delete.append(nbr)
print "Deleted %i cells"%del_count
# ## Experimental stitching - started to backport from trigrid2.py, not
# ## complete. See paver.py:splice_in_grid
# @staticmethod
# def stitch_grids(grids,use_envelope=False,envelope_tol=0.01,join_tolerance=0.25):
# """ grids: an iterable of TriGrid instances
# combines all grids together, removing duplicate points, joining coincident vertices
#
# use_envelope: use the rectangular bounding box of each grid to determine joinable
# nodes.
# envelope_tol: if using the grid bounds to determine joinable leaf nodes, the tolerance
# for determining that a node does lie on the boundary.
# join_tolerance: leaf nodes from adjacent grids within this distance range will be
# considered coincident, and joined.
#
# """
# # for each grid, an array of node indices which will be considered
# all_leaves = []
#
# accum_grid = None
#
# for i,gridB in enumerate(grids):
# if i % 100 == 0:
# print "%d / %d"%(i,len(grids)-1)
#
# if gridB.Npoints() == 0:
# print "empty"
# continue
#
# gridB.verbose = 0
# gridB.renumber()
#
# Bleaves = array(gridB.leaf_nodes(use_envelope=use_envelope,
# tolerance=envelope_tol,
# use_degree=False),
# int32)
#
# if i == 0:
# accum_grid = gridB
# if len(Bleaves):
# all_leaves.append( Bleaves )
# else:
# accum_grid.append_grid(gridB)
# if len(Bleaves):
# all_leaves.append( Bleaves + gridB.node_offset)
#
# all_leaves = concatenate(all_leaves)
#
# # build an index of the leaf nodes to speed up joining
# lf = field.XYZField(X=accum_grid.nodes['x'][all_leaves],
# F=all_leaves)
# lf.build_index()
#
# to_join=[] # [ (i,j), ...] , with i<j, and i,j indexes into accum_grid.nodes
# for i,l in enumerate(all_leaves):
# nbrs = lf.nearest(lf.X[i],count=4)
#
# for nbr in nbrs:
# if nbr <= i:
# continue
# dist = norm(lf.X[i] - lf.X[nbr])
# if dist < join_tolerance:
# print "Joining with distance ",dist
# to_join.append( (all_leaves[i], all_leaves[nbr]) )
#
# # okay - so need to allow for multiple joins with a single node.
# # done - but is the joining code going to handle that okay?
#
# _remapped = {} # for joined nodes, track who they became
# def canonicalize(n): # recursively resolve remapped nodes
# while _remapped.has_key(n):
# n = _remapped[n]
# return n
#
# for a,b in to_join:
# a = canonicalize(a)
# b = canonicalize(b)
# if a==b:
# continue
#
# a_nbrs = accum_grid.node_neighbors(a)
# accum_grid.delete_node(a)
# for a_nbr in a_nbrs:
# # with edges along a boundary, it's possible that
# # the new edge already exists
# try:
# accum_grid.nodes_to_edge( [a_nbr,b] )
# except NoSuchEdgeError:
# accum_grid.add_edge(a_nbr,b)
# return accum_grid
# Undo-history management - very generic.
op_stack_serial = 10
op_stack = None
def checkpoint(self):
if self.op_stack is None:
self.op_stack_serial += 1
self.op_stack = []
return self.op_stack_serial,len(self.op_stack)
def revert(self,cp):
serial,frame = cp
if serial != self.op_stack_serial:
raise ValueError,"The current op stack has serial %d, but your checkpoint is %s"%(self.op_stack_serial,
serial)
while len(self.op_stack) > frame:
self.pop_op()
def commit(self):
self.op_stack = None
self.op_stack_serial += 1
def push_op(self,meth,*data,**kwdata):
if self.op_stack is not None:
self.op_stack.append( (meth,data,kwdata) )
def pop_op(self):
f = self.op_stack.pop()
if self.verbose > 3:
print "popping: ",f
meth = f[0]
args = f[1]
kwargs = f[2]
meth(*args,**kwargs)
###
def unmove_node(self,i,orig_val):
# update point index:
if self.index is not None:
curr_coords = self.points[i,xxyy]
orig_coords = orig_val[xxyy]
self.index.delete(i, curr_coords )
self.index.insert(i, orig_coords )
self.points[i] = orig_val
def move_node(self,i,new_pnt):
self.push_op(self.unmove_node,i,self.points[i].copy())
# update point index:
if self.index is not None:
old_coords = self.points[i,xxyy]
new_coords = new_pnt[xxyy]
self.index.delete(i, old_coords )
self.index.insert(i, new_coords )
self.points[i] = new_pnt
self.updated_node(i)
for e in self.pnt2edges(i):
new_ec = self.points[self.edges[e,:2]].mean(axis=0)
if self._edge_centers is not None:
old_ec = self._edge_centers[e]
self._edge_centers[e] = new_ec
if self.edge_index is not None:
self.edge_index.delete(e,old_ec[xxyy])
self.edge_index.insert(e,new_ec[xxyy])
self.updated_edge(e)
def updated_node(self,i):
for cb in self._update_node_listeners.values():
cb(i)
def updated_edge(self,e):
for cb in self._update_edge_listeners.values():
cb(e)
def updated_cell(self,c):
for cb in self._update_cell_listeners.values():
cb(c)
def created_node(self,i):
for cb in self._create_node_listeners.values():
cb(i)
def created_edge(self,e):
# fix up the edge index
ec = self.points[self.edges[e,:2]].mean(axis=0)
if self._edge_centers is not None:
if e != len(self._edge_centers):
# ideally should know where this is getting out of sync and
# fix it there.
print "Edge centers is out of sync. clearing it."
self._edge_centers = None
self.edge_index = None
else:
self._edge_centers = array_append(self._edge_centers,ec)
if self.edge_index is not None:
print "edge_index: inserting new edge center %i %s"%(e,ec)
self.edge_index.insert( e, ec[xxyy] )
for cb in self._create_edge_listeners.values():
cb(e)
def created_cell(self,c):
for cb in self._create_cell_listeners.values():
cb(c)
def deleted_cell(self,c):
for cb in self._delete_cell_listeners.values():
cb(c)
def deleted_node(self,i):
for cb in self._delete_node_listeners.values():
cb(i)
def deleted_edge(self,e):
for cb in self._delete_edge_listeners.values():
cb(e)
# subscriber interface for updates:
listener_count = 0
def init_listeners(self):
self._update_node_listeners = {}
self._update_edge_listeners = {}
self._update_cell_listeners = {}
self._create_node_listeners = {}
self._create_edge_listeners = {}
self._create_cell_listeners = {}
self._delete_node_listeners = {}
self._delete_edge_listeners = {}
self._delete_cell_listeners = {}
def listen(self,event,cb):
cb_id = self.listener_count
if event == 'update_node':
self._update_node_listeners[cb_id] = cb
elif event == 'update_edge':
self._update_edge_listeners[cb_id] = cb
elif event == 'update_cell':
self._update_cell_listeners[cb_id] = cb
elif event == 'create_node':
self._create_node_listeners[cb_id] = cb
elif event == 'create_edge':
self._create_edge_listeners[cb_id] = cb
elif event == 'delete_node':
self._delete_node_listeners[cb_id] = cb
elif event == 'delete_edge':
self._delete_edge_listeners[cb_id] = cb
elif event == 'delete_cell':
self._delete_cell_listeners[cb_id] = cb
else:
raise Exception,"unknown event %s"%event
self.listener_count += 1
return cb_id
def unlisten(self,cb_id):
for l in [ self._update_node_listeners,
self._update_edge_listeners,
self._update_cell_listeners,
self._create_node_listeners,
self._create_edge_listeners,
self._create_cell_listeners,
self._delete_node_listeners,
self._delete_edge_listeners,
self._delete_cell_listeners]:
if l.has_key(cb_id):
del l[cb_id]
return
print "Failed to remove cb_id %d"%cb_id
# g = TriGrid(sms_fname="../suntans/main/testcases/sms-grid/fort.grd")
# # g.plot(voronoi=False)
# # show()
# g.write_suntans("../suntans/main/testcases/sms-grid")
#
# g2 = TriGrid(suntans_path="../suntans/main/testcases/sms-grid")
# g2.plot()
# show()
# g = TriGrid(suntans_path="../suntans/main/testcases/sanfran_lowres_1000_trial/data")
# scalar = rand(g.Ncells())
# g.plot_scalar(scalar)
# g.plot(voronoi=False)
# show()
if __name__ == '__main__':
g = TriGrid(sms_fname="/home/rusty/data/sfbay/grids/100km-arc/250m/250m-100km_arc.grd")
g.plot()
g.verify_bc(do_plot=1)
|
rustychris/stomel
|
src/trigrid.py
|
Python
|
gpl-2.0
| 113,063
|
[
"Bowtie",
"VisIt"
] |
6a3b442a32af4bdd3d01fa5eeadde3e7ffc24c6dccc226f2115399f2497ee879
|
#!/usr/bin/env python3
# Convenience tool to get the JIRA tokens in place.
# This helps with the initial setup when connecting the bot for the first time.
# This is example code from Atlassian - https://bitbucket.org/atlassianlabs/atlassian-oauth-examples/
# The modified version for Python requests was copied from this fork:
# https://bitbucket.org/MicahCarrick/atlassian-oauth-examples/src/68d005311b9b74d6a85787bb87ccc948766486d3/python-requests/example.py?at=default&fileviewer=file-view-default
from oauthlib.oauth1 import SIGNATURE_RSA # type: ignore
from requests_oauthlib import OAuth1Session # type: ignore
from jira.client import JIRA # type: ignore
def read(file_path: str) -> str:
""" Read a file and return it's contents. """
with open(file_path) as f:
return f.read()
# The Consumer Key created while setting up the "Incoming Authentication" in
# JIRA for the Application Link.
CONSUMER_KEY = 'jira-gerrit-oauth'
# The contents of the rsa.pem file generated (the private RSA key)
RSA_KEY = read('jiracloser.pem')
# The URLs for the JIRA instance
JIRA_SERVER = 'https://bugreports-test.qt.io'
REQUEST_TOKEN_URL = JIRA_SERVER + '/plugins/servlet/oauth/request-token'
AUTHORIZE_URL = JIRA_SERVER + '/plugins/servlet/oauth/authorize'
ACCESS_TOKEN_URL = JIRA_SERVER + '/plugins/servlet/oauth/access-token'
# Step 1: Get a request token
oauth = OAuth1Session(CONSUMER_KEY, signature_type='auth_header',
signature_method=SIGNATURE_RSA, rsa_key=RSA_KEY)
request_token = oauth.fetch_request_token(REQUEST_TOKEN_URL)
print("STEP 1: GET REQUEST TOKEN")
print(" oauth_token={}".format(request_token['oauth_token']))
print(" oauth_token_secret={}".format(request_token['oauth_token_secret']))
print("\n")
# Step 2: Get the end-user's authorization
print("STEP2: AUTHORIZATION")
print(" Visit to the following URL to provide authorization:")
print(" {}?oauth_token={}".format(AUTHORIZE_URL, request_token['oauth_token']))
print("\n")
while input("Press any key to continue..."):
pass
# Step 3: Get the access token
access_token = oauth.fetch_access_token(ACCESS_TOKEN_URL, verifier="some_verifier")
print("STEP2: GET ACCESS TOKEN")
print(" oauth_token={}".format(access_token['oauth_token']))
print(" oauth_token_secret={}".format(access_token['oauth_token_secret']))
print("\n")
# Now you can use the access tokens with the JIRA client. Hooray!
jira = JIRA(options={'server': JIRA_SERVER}, oauth={
'access_token': access_token['oauth_token'],
'access_token_secret': access_token['oauth_token_secret'],
'consumer_key': CONSUMER_KEY,
'key_cert': RSA_KEY
})
# print all of the project keys just as an example
print("Verifying that the access works, listing JIRA projects:")
for project in jira.projects():
print(project.key)
|
qtproject/qtqa
|
scripts/jira/jira-bug-closer/oauth_dance.py
|
Python
|
gpl-3.0
| 2,820
|
[
"VisIt"
] |
b4cb0b163e6ab07fb1d9f5759988e1e0ca620eb48c2905903ccc36b7d4514190
|
# -*- coding: utf-8 -*-
r"""
==========================================================
Create a new coordinate class (for the Sagittarius stream)
==========================================================
This document describes in detail how to subclass and define a custom spherical
coordinate frame, as discussed in :ref:`astropy:astropy-coordinates-design` and
the docstring for `~astropy.coordinates.BaseCoordinateFrame`. In this example,
we will define a coordinate system defined by the plane of orbit of the
Sagittarius Dwarf Galaxy (hereafter Sgr; as defined in Majewski et al. 2003).
The Sgr coordinate system is often referred to in terms of two angular
coordinates, :math:`\Lambda,B`.
To do this, we need to define a subclass of
`~astropy.coordinates.BaseCoordinateFrame` that knows the names and units of the
coordinate system angles in each of the supported representations. In this case
we support `~astropy.coordinates.SphericalRepresentation` with "Lambda" and
"Beta". Then we have to define the transformation from this coordinate system to
some other built-in system. Here we will use Galactic coordinates, represented
by the `~astropy.coordinates.Galactic` class.
See Also
--------
* The `gala package <http://gala.adrian.pw/>`_, which defines a number of
Astropy coordinate frames for stellar stream coordinate systems.
* Majewski et al. 2003, "A Two Micron All Sky Survey View of the Sagittarius
Dwarf Galaxy. I. Morphology of the Sagittarius Core and Tidal Arms",
https://arxiv.org/abs/astro-ph/0304198
* Law & Majewski 2010, "The Sagittarius Dwarf Galaxy: A Model for Evolution in a
Triaxial Milky Way Halo", https://arxiv.org/abs/1003.1132
* David Law's Sgr info page https://www.stsci.edu/~dlaw/Sgr/
*By: Adrian Price-Whelan, Erik Tollerud*
*License: BSD*
"""
##############################################################################
# Make `print` work the same in all versions of Python, set up numpy,
# matplotlib, and use a nicer set of plot parameters:
import numpy as np
import matplotlib.pyplot as plt
from astropy.visualization import astropy_mpl_style
plt.style.use(astropy_mpl_style)
##############################################################################
# Import the packages necessary for coordinates
from astropy.coordinates import frame_transform_graph
from astropy.coordinates.matrix_utilities import rotation_matrix, matrix_product, matrix_transpose
import astropy.coordinates as coord
import astropy.units as u
##############################################################################
# The first step is to create a new class, which we'll call
# ``Sagittarius`` and make it a subclass of
# `~astropy.coordinates.BaseCoordinateFrame`:
class Sagittarius(coord.BaseCoordinateFrame):
"""
A Heliocentric spherical coordinate system defined by the orbit
of the Sagittarius dwarf galaxy, as described in
https://ui.adsabs.harvard.edu/abs/2003ApJ...599.1082M
and further explained in
https://www.stsci.edu/~dlaw/Sgr/.
Parameters
----------
representation : `~astropy.coordinates.BaseRepresentation` or None
A representation object or None to have no data (or use the other keywords)
Lambda : `~astropy.coordinates.Angle`, optional, must be keyword
The longitude-like angle corresponding to Sagittarius' orbit.
Beta : `~astropy.coordinates.Angle`, optional, must be keyword
The latitude-like angle corresponding to Sagittarius' orbit.
distance : `~astropy.units.Quantity`, optional, must be keyword
The Distance for this object along the line-of-sight.
pm_Lambda_cosBeta : `~astropy.units.Quantity`, optional, must be keyword
The proper motion along the stream in ``Lambda`` (including the
``cos(Beta)`` factor) for this object (``pm_Beta`` must also be given).
pm_Beta : `~astropy.units.Quantity`, optional, must be keyword
The proper motion in Declination for this object (``pm_ra_cosdec`` must
also be given).
radial_velocity : `~astropy.units.Quantity`, optional, keyword-only
The radial velocity of this object.
"""
default_representation = coord.SphericalRepresentation
default_differential = coord.SphericalCosLatDifferential
frame_specific_representation_info = {
coord.SphericalRepresentation: [
coord.RepresentationMapping('lon', 'Lambda'),
coord.RepresentationMapping('lat', 'Beta'),
coord.RepresentationMapping('distance', 'distance')]
}
##############################################################################
# Breaking this down line-by-line, we define the class as a subclass of
# `~astropy.coordinates.BaseCoordinateFrame`. Then we include a descriptive
# docstring. The final lines are class-level attributes that specify the
# default representation for the data, default differential for the velocity
# information, and mappings from the attribute names used by representation
# objects to the names that are to be used by the ``Sagittarius`` frame. In this
# case we override the names in the spherical representations but don't do
# anything with other representations like cartesian or cylindrical.
#
# Next we have to define the transformation from this coordinate system to some
# other built-in coordinate system; we will use Galactic coordinates. We can do
# this by defining functions that return transformation matrices, or by simply
# defining a function that accepts a coordinate and returns a new coordinate in
# the new system. Because the transformation to the Sagittarius coordinate
# system is just a spherical rotation from Galactic coordinates, we'll just
# define a function that returns this matrix. We'll start by constructing the
# transformation matrix using pre-determined Euler angles and the
# ``rotation_matrix`` helper function:
SGR_PHI = (180 + 3.75) * u.degree # Euler angles (from Law & Majewski 2010)
SGR_THETA = (90 - 13.46) * u.degree
SGR_PSI = (180 + 14.111534) * u.degree
# Generate the rotation matrix using the x-convention (see Goldstein)
D = rotation_matrix(SGR_PHI, "z")
C = rotation_matrix(SGR_THETA, "x")
B = rotation_matrix(SGR_PSI, "z")
A = np.diag([1.,1.,-1.])
SGR_MATRIX = matrix_product(A, B, C, D)
##############################################################################
# Since we already constructed the transformation (rotation) matrix above, and
# the inverse of a rotation matrix is just its transpose, the required
# transformation functions are very simple:
@frame_transform_graph.transform(coord.StaticMatrixTransform, coord.Galactic, Sagittarius)
def galactic_to_sgr():
""" Compute the transformation matrix from Galactic spherical to
heliocentric Sgr coordinates.
"""
return SGR_MATRIX
##############################################################################
# The decorator ``@frame_transform_graph.transform(coord.StaticMatrixTransform,
# coord.Galactic, Sagittarius)`` registers this function on the
# ``frame_transform_graph`` as a coordinate transformation. Inside the function,
# we simply return the previously defined rotation matrix.
#
# We then register the inverse transformation by using the transpose of the
# rotation matrix (which is faster to compute than the inverse):
@frame_transform_graph.transform(coord.StaticMatrixTransform, Sagittarius, coord.Galactic)
def sgr_to_galactic():
""" Compute the transformation matrix from heliocentric Sgr coordinates to
spherical Galactic.
"""
return matrix_transpose(SGR_MATRIX)
##############################################################################
# Now that we've registered these transformations between ``Sagittarius`` and
# `~astropy.coordinates.Galactic`, we can transform between *any* coordinate
# system and ``Sagittarius`` (as long as the other system has a path to
# transform to `~astropy.coordinates.Galactic`). For example, to transform from
# ICRS coordinates to ``Sagittarius``, we would do:
icrs = coord.SkyCoord(280.161732*u.degree, 11.91934*u.degree, frame='icrs')
sgr = icrs.transform_to(Sagittarius)
print(sgr)
##############################################################################
# Or, to transform from the ``Sagittarius`` frame to ICRS coordinates (in this
# case, a line along the ``Sagittarius`` x-y plane):
sgr = coord.SkyCoord(Lambda=np.linspace(0, 2*np.pi, 128)*u.radian,
Beta=np.zeros(128)*u.radian, frame='sagittarius')
icrs = sgr.transform_to(coord.ICRS)
print(icrs)
##############################################################################
# As an example, we'll now plot the points in both coordinate systems:
fig, axes = plt.subplots(2, 1, figsize=(8, 10),
subplot_kw={'projection': 'aitoff'})
axes[0].set_title("Sagittarius")
axes[0].plot(sgr.Lambda.wrap_at(180*u.deg).radian, sgr.Beta.radian,
linestyle='none', marker='.')
axes[1].set_title("ICRS")
axes[1].plot(icrs.ra.wrap_at(180*u.deg).radian, icrs.dec.radian,
linestyle='none', marker='.')
plt.show()
##############################################################################
# This particular transformation is just a spherical rotation, which is a
# special case of an Affine transformation with no vector offset. The
# transformation of velocity components is therefore natively supported as
# well:
sgr = coord.SkyCoord(Lambda=np.linspace(0, 2*np.pi, 128)*u.radian,
Beta=np.zeros(128)*u.radian,
pm_Lambda_cosBeta=np.random.uniform(-5, 5, 128)*u.mas/u.yr,
pm_Beta=np.zeros(128)*u.mas/u.yr,
frame='sagittarius')
icrs = sgr.transform_to(coord.ICRS)
print(icrs)
fig, axes = plt.subplots(3, 1, figsize=(8, 10), sharex=True)
axes[0].set_title("Sagittarius")
axes[0].plot(sgr.Lambda.degree,
sgr.pm_Lambda_cosBeta.value,
linestyle='none', marker='.')
axes[0].set_xlabel(r"$\Lambda$ [deg]")
axes[0].set_ylabel(
fr"$\mu_\Lambda \, \cos B$ [{sgr.pm_Lambda_cosBeta.unit.to_string('latex_inline')}]")
axes[1].set_title("ICRS")
axes[1].plot(icrs.ra.degree, icrs.pm_ra_cosdec.value,
linestyle='none', marker='.')
axes[1].set_ylabel(
fr"$\mu_\alpha \, \cos\delta$ [{icrs.pm_ra_cosdec.unit.to_string('latex_inline')}]")
axes[2].set_title("ICRS")
axes[2].plot(icrs.ra.degree, icrs.pm_dec.value,
linestyle='none', marker='.')
axes[2].set_xlabel("RA [deg]")
axes[2].set_ylabel(
fr"$\mu_\delta$ [{icrs.pm_dec.unit.to_string('latex_inline')}]")
plt.show()
|
saimn/astropy
|
examples/coordinates/plot_sgr-coordinate-frame.py
|
Python
|
bsd-3-clause
| 10,586
|
[
"Galaxy"
] |
547790e0143fe9a8b1996e4cecd461eb313bd82630ddb4630845753fd1343eef
|
# -*- coding: utf-8 -*-
'''
Provide authentication using YubiKey.
.. versionadded:: 2015.5.0
:depends: yubico-client Python module
To get your YubiKey API key you will need to visit the website below.
https://upgrade.yubico.com/getapikey/
The resulting page will show the generated Client ID (aka AuthID or API ID)
and the generated API key (Secret Key). Make a note of both and use these
two values in your /etc/salt/master configuration.
/etc/salt/master
.. code-block:: yaml
yubico_users:
damian:
id: 12345
key: ABCDEFGHIJKLMNOPQRSTUVWXYZ
.. code-block:: yaml
external_auth:
yubico:
damian:
- test.*
Please wait five to ten minutes after generating the key before testing so that
the API key will be updated on all the YubiCloud servers.
'''
# Import Python Libs
from __future__ import absolute_import
from __future__ import print_function
import logging
log = logging.getLogger(__name__)
try:
from yubico_client import Yubico, yubico_exceptions
HAS_YUBICO = True
except ImportError:
HAS_YUBICO = False
def __get_yubico_users(username):
'''
Grab the YubiKey Client ID & Secret Key
'''
user = {}
try:
if __opts__['yubico_users'].get(username, None):
(user['id'], user['key']) = list(__opts__['yubico_users'][username].values())
else:
return None
except KeyError:
return None
return user
def auth(username, password):
'''
Authentcate against yubico server
'''
_cred = __get_yubico_users(username)
client = Yubico(_cred['id'], _cred['key'])
try:
if client.verify(password):
return True
else:
return False
except yubico_exceptions.StatusCodeError as e:
log.info('Unable to verify YubiKey `{0}`'.format(e))
return False
if __name__ == '__main__':
__opts__ = {'yubico_users': {'damian': {'id': '12345', 'key': 'ABC123'}}}
if auth('damian', 'OPT'):
print("Authenticated")
else:
print("Failed to authenticate")
|
smallyear/linuxLearn
|
salt/salt/auth/yubico.py
|
Python
|
apache-2.0
| 2,095
|
[
"VisIt"
] |
ed3319c970e8dfc6c52cf51d0f5fde1dec813466641df6fd51e07c2f659879e2
|
from ase import *
from ase.calculators import TestPotential
np.seterr(all='raise')
a = Atoms('4N',
positions=[(0, 0, 0),
(1, 0, 0),
(0, 1, 0),
(0.1, 0.2, 0.7)],
calculator=TestPotential())
print a.get_forces()
md = VelocityVerlet(a, dt=0.005)
def f():
print a.get_potential_energy(), a.get_total_energy()
md.attach(f, 500)
traj = PickleTrajectory('4N.traj', 'w', a)
md.attach(traj.write, 100)
print md.observers
md.run(steps=10000)
qn = QuasiNewton(a)
qn.attach(traj.write)
qn.run()
|
freephys/python_ase
|
ase/test/verlet.py
|
Python
|
gpl-3.0
| 571
|
[
"ASE"
] |
2b1ca9d336577c85b0d2003ddb50af387397079bf666d993e9601dca2936bb47
|
import logging
import os
import re
import socket
import sys
import time
from optparse import make_option
import django
from django.conf import settings
from django.core.management.base import BaseCommand, CommandError
from django.db import connections, DEFAULT_DB_ALIAS
from django.core.exceptions import ImproperlyConfigured
from django_extensions.management.technical_response import null_technical_500_response
from django_extensions.management.utils import RedirectHandler, setup_logger, signalcommand, has_ipdb
try:
if 'django.contrib.staticfiles' in settings.INSTALLED_APPS:
from django.contrib.staticfiles.handlers import StaticFilesHandler
USE_STATICFILES = True
elif 'staticfiles' in settings.INSTALLED_APPS:
from staticfiles.handlers import StaticFilesHandler # noqa
USE_STATICFILES = True
else:
USE_STATICFILES = False
except ImportError:
USE_STATICFILES = False
try:
from django.db.migrations.executor import MigrationExecutor
HAS_MIGRATIONS = True
except ImportError:
HAS_MIGRATIONS = False
naiveip_re = re.compile(r"""^(?:
(?P<addr>
(?P<ipv4>\d{1,3}(?:\.\d{1,3}){3}) | # IPv4 address
(?P<ipv6>\[[a-fA-F0-9:]+\]) | # IPv6 address
(?P<fqdn>[a-zA-Z0-9-]+(?:\.[a-zA-Z0-9-]+)*) # FQDN
):)?(?P<port>\d+)$""", re.X)
DEFAULT_PORT = "8000"
DEFAULT_POLLER_RELOADER_INTERVAL = getattr(settings, 'RUNSERVERPLUS_POLLER_RELOADER_INTERVAL', 1)
logger = logging.getLogger(__name__)
class Command(BaseCommand):
help = "Starts a lightweight Web server for development."
args = '[optional port number, or ipaddr:port]'
# Validation is called explicitly each time the server is reloaded.
requires_system_checks = False
if django.VERSION >= (1, 8):
def add_arguments(self, parser):
parser.add_argument('--ipv6', '-6', action='store_true', dest='use_ipv6', default=False,
help='Tells Django to use a IPv6 address.')
parser.add_argument('--noreload', action='store_false', dest='use_reloader', default=True,
help='Tells Django to NOT use the auto-reloader.')
parser.add_argument('--browser', action='store_true', dest='open_browser',
help='Tells Django to open a browser.')
parser.add_argument('--adminmedia', dest='admin_media_path', default='',
help='Specifies the directory from which to serve admin media.')
parser.add_argument('--nothreading', action='store_false', dest='threaded',
help='Do not run in multithreaded mode.')
parser.add_argument('--threaded', action='store_true', dest='threaded',
help='Run in multithreaded mode.')
parser.add_argument('--output', dest='output_file', default=None,
help='Specifies an output file to send a copy of all messages (not flushed immediately).')
parser.add_argument('--print-sql', action='store_true', default=False,
help="Print SQL queries as they're executed")
parser.add_argument('--cert', dest='cert_path', action="store", type=str,
help='To use SSL, specify certificate path.')
parser.add_argument('--extra-file', dest='extra_files', action="append", type=str,
help='auto-reload whenever the given file changes too (can be specified multiple times)')
parser.add_argument('--reloader-interval', dest='reloader_interval', action="store", type=int, default=DEFAULT_POLLER_RELOADER_INTERVAL,
help='After how many seconds auto-reload should scan for updates in poller-mode [default=%s]' % DEFAULT_POLLER_RELOADER_INTERVAL)
parser.add_argument('--pdb', action='store_true', dest='pdb', default=False,
help='Drop into pdb shell at the start of any view.')
parser.add_argument('--ipdb', action='store_true', dest='ipdb', default=False,
help='Drop into ipdb shell at the start of any view.')
parser.add_argument('--pm', action='store_true', dest='pm', default=False,
help='Drop into (i)pdb shell if an exception is raised in a view.')
parser.add_argument('--startup-messages', dest='startup_messages', action="store", default='reload',
help='When to show startup messages: reload [default], once, always, never.')
if USE_STATICFILES:
parser.add_argument('--nostatic', action="store_false", dest='use_static_handler', default=True,
help='Tells Django to NOT automatically serve static files at STATIC_URL.')
parser.add_argument('--insecure', action="store_true", dest='insecure_serving', default=False,
help='Allows serving static files even if DEBUG is False.')
else:
option_list = BaseCommand.option_list + (
make_option('--ipv6', '-6', action='store_true', dest='use_ipv6', default=False,
help='Tells Django to use a IPv6 address.'),
make_option('--noreload', action='store_false', dest='use_reloader', default=True,
help='Tells Django to NOT use the auto-reloader.'),
make_option('--browser', action='store_true', dest='open_browser',
help='Tells Django to open a browser.'),
make_option('--adminmedia', dest='admin_media_path', default='',
help='Specifies the directory from which to serve admin media.'),
make_option('--nothreading', action='store_false', dest='threaded',
help='Do not run in multithreaded mode.'),
make_option('--threaded', action='store_true', dest='threaded',
help='Run in multithreaded mode.'),
make_option('--output', dest='output_file', default=None,
help='Specifies an output file to send a copy of all messages (not flushed immediately).'),
make_option('--print-sql', action='store_true', default=False,
help="Print SQL queries as they're executed"),
make_option('--cert', dest='cert_path', action="store", type="string",
help='To use SSL, specify certificate path.'),
make_option('--extra-file', dest='extra_files', action="append", type="string",
help='auto-reload whenever the given file changes too (can be specified multiple times)'),
make_option('--reloader-interval', dest='reloader_interval', action="store", type="int", default=DEFAULT_POLLER_RELOADER_INTERVAL,
help='After how many seconds auto-reload should scan for updates in poller-mode [default=%s]' % DEFAULT_POLLER_RELOADER_INTERVAL),
make_option('--pdb', action='store_true', dest='pdb', default=False,
help='Drop into pdb shell at the start of any view.'),
make_option('--ipdb', action='store_true', dest='ipdb', default=False,
help='Drop into ipdb shell at the start of any view.'),
make_option('--pm', action='store_true', dest='pm', default=False,
help='Drop into (i)pdb shell if an exception is raised in a view.'),
make_option('--startup-messages', dest='startup_messages', action="store", default='reload',
help='When to show startup messages: reload [default], once, always, never.')
)
if USE_STATICFILES:
option_list += (
make_option('--nostatic', action="store_false", dest='use_static_handler', default=True,
help='Tells Django to NOT automatically serve static files at STATIC_URL.'),
make_option('--insecure', action="store_true", dest='insecure_serving', default=False,
help='Allows serving static files even if DEBUG is False.'),
)
@signalcommand
def handle(self, addrport='', *args, **options):
import django
startup_messages = options.get('startup_messages', 'reload')
if startup_messages == "reload":
self.show_startup_messages = os.environ.get('RUNSERVER_PLUS_SHOW_MESSAGES')
elif startup_messages == "once":
self.show_startup_messages = not os.environ.get('RUNSERVER_PLUS_SHOW_MESSAGES')
elif startup_messages == "never":
self.show_startup_messages = False
else:
self.show_startup_messages = True
os.environ['RUNSERVER_PLUS_SHOW_MESSAGES'] = '1'
# Do not use default ending='\n', because StreamHandler() takes care of it
if hasattr(self.stderr, 'ending'):
self.stderr.ending = None
setup_logger(logger, self.stderr, filename=options.get('output_file', None)) # , fmt="[%(name)s] %(message)s")
logredirect = RedirectHandler(__name__)
# Redirect werkzeug log items
werklogger = logging.getLogger('werkzeug')
werklogger.setLevel(logging.INFO)
werklogger.addHandler(logredirect)
werklogger.propagate = False
if options.get("print_sql", False):
try:
# Django 1.7 onwards
from django.db.backends import utils
except ImportError:
# Django 1.6 below
from django.db.backends import util as utils
try:
import sqlparse
except ImportError:
sqlparse = None # noqa
class PrintQueryWrapper(utils.CursorDebugWrapper):
def execute(self, sql, params=()):
starttime = time.time()
try:
return self.cursor.execute(sql, params)
finally:
raw_sql = self.db.ops.last_executed_query(self.cursor, sql, params)
execution_time = time.time() - starttime
therest = ' -- [Execution time: %.6fs] [Database: %s]' % (execution_time, self.db.alias)
if sqlparse:
logger.info(sqlparse.format(raw_sql, reindent=True) + therest)
else:
logger.info(raw_sql + therest)
utils.CursorDebugWrapper = PrintQueryWrapper
try:
from django.core.servers.basehttp import AdminMediaHandler
USE_ADMINMEDIAHANDLER = True
except ImportError:
USE_ADMINMEDIAHANDLER = False
try:
from django.core.servers.basehttp import get_internal_wsgi_application as WSGIHandler
except ImportError:
from django.core.handlers.wsgi import WSGIHandler # noqa
try:
from werkzeug import run_simple, DebuggedApplication
# Set colored output
if settings.DEBUG:
try:
set_werkzeug_log_color()
except: # We are dealing with some internals, anything could go wrong
if self.show_startup_messages:
print("Wrapping internal werkzeug logger for color highlighting has failed!")
pass
except ImportError:
raise CommandError("Werkzeug is required to use runserver_plus. Please visit http://werkzeug.pocoo.org/ or install via pip. (pip install Werkzeug)")
pdb_option = options.get('pdb', False)
ipdb_option = options.get('ipdb', False)
pm = options.get('pm', False)
try:
from django_pdb.middleware import PdbMiddleware
except ImportError:
if pdb_option or ipdb_option or pm:
raise CommandError("django-pdb is required for --pdb, --ipdb and --pm options. Please visit https://pypi.python.org/pypi/django-pdb or install via pip. (pip install django-pdb)")
pm = False
else:
# Add pdb middleware if --pdb is specified or if in DEBUG mode
middleware = 'django_pdb.middleware.PdbMiddleware'
if ((pdb_option or ipdb_option or settings.DEBUG) and middleware not in settings.MIDDLEWARE_CLASSES):
settings.MIDDLEWARE_CLASSES += (middleware,)
# If --pdb is specified then always break at the start of views.
# Otherwise break only if a 'pdb' query parameter is set in the url
if pdb_option:
PdbMiddleware.always_break = 'pdb'
elif ipdb_option:
PdbMiddleware.always_break = 'ipdb'
def postmortem(request, exc_type, exc_value, tb):
if has_ipdb():
import ipdb
p = ipdb
else:
import pdb
p = pdb
print >>sys.stderr, "Exception occured: %s, %s" % (exc_type,
exc_value)
p.post_mortem(tb)
# usurp django's handler
from django.views import debug
debug.technical_500_response = postmortem if pm else null_technical_500_response
self.use_ipv6 = options.get('use_ipv6')
if self.use_ipv6 and not socket.has_ipv6:
raise CommandError('Your Python does not support IPv6.')
self._raw_ipv6 = False
if not addrport:
try:
addrport = settings.RUNSERVERPLUS_SERVER_ADDRESS_PORT
except AttributeError:
pass
if not addrport:
self.addr = ''
self.port = DEFAULT_PORT
else:
m = re.match(naiveip_re, addrport)
if m is None:
raise CommandError('"%s" is not a valid port number '
'or address:port pair.' % addrport)
self.addr, _ipv4, _ipv6, _fqdn, self.port = m.groups()
if not self.port.isdigit():
raise CommandError("%r is not a valid port number." %
self.port)
if self.addr:
if _ipv6:
self.addr = self.addr[1:-1]
self.use_ipv6 = True
self._raw_ipv6 = True
elif self.use_ipv6 and not _fqdn:
raise CommandError('"%s" is not a valid IPv6 address.'
% self.addr)
if not self.addr:
self.addr = '::1' if self.use_ipv6 else '127.0.0.1'
threaded = options.get('threaded', True)
use_reloader = options.get('use_reloader', True)
open_browser = options.get('open_browser', False)
cert_path = options.get("cert_path")
quit_command = (sys.platform == 'win32') and 'CTRL-BREAK' or 'CONTROL-C'
bind_url = "http://%s:%s/" % (
self.addr if not self._raw_ipv6 else '[%s]' % self.addr, self.port)
extra_files = options.get('extra_files', None) or []
reloader_interval = options.get('reloader_interval', 1)
def inner_run():
if self.show_startup_messages:
print("Performing system checks...\n")
if hasattr(self, 'check'):
self.check(display_num_errors=self.show_startup_messages)
else:
self.validate(display_num_errors=self.show_startup_messages)
if HAS_MIGRATIONS:
try:
self.check_migrations()
except ImproperlyConfigured:
pass
if self.show_startup_messages:
print("\nDjango version %s, using settings %r" % (django.get_version(), settings.SETTINGS_MODULE))
print("Development server is running at %s" % (bind_url,))
print("Using the Werkzeug debugger (http://werkzeug.pocoo.org/)")
print("Quit the server with %s." % quit_command)
path = options.get('admin_media_path', '')
if not path:
admin_media_path = os.path.join(django.__path__[0], 'contrib/admin/static/admin')
if os.path.isdir(admin_media_path):
path = admin_media_path
else:
path = os.path.join(django.__path__[0], 'contrib/admin/media')
handler = WSGIHandler()
if USE_ADMINMEDIAHANDLER:
handler = AdminMediaHandler(handler, path)
if USE_STATICFILES:
use_static_handler = options.get('use_static_handler', True)
insecure_serving = options.get('insecure_serving', False)
if use_static_handler and (settings.DEBUG or insecure_serving):
handler = StaticFilesHandler(handler)
if open_browser:
import webbrowser
webbrowser.open(bind_url)
if cert_path:
"""
OpenSSL is needed for SSL support.
This will make flakes8 throw warning since OpenSSL is not used
directly, alas, this is the only way to show meaningful error
messages. See:
http://lucumr.pocoo.org/2011/9/21/python-import-blackbox/
for more information on python imports.
"""
try:
import OpenSSL # NOQA
except ImportError:
raise CommandError("Python OpenSSL Library is "
"required to use runserver_plus with ssl support. "
"Install via pip (pip install pyOpenSSL).")
dir_path, cert_file = os.path.split(cert_path)
if not dir_path:
dir_path = os.getcwd()
root, ext = os.path.splitext(cert_file)
certfile = os.path.join(dir_path, root + ".crt")
keyfile = os.path.join(dir_path, root + ".key")
try:
from werkzeug.serving import make_ssl_devcert
if os.path.exists(certfile) and \
os.path.exists(keyfile):
ssl_context = (certfile, keyfile)
else: # Create cert, key files ourselves.
ssl_context = make_ssl_devcert(
os.path.join(dir_path, root), host='localhost')
except ImportError:
if self.show_startup_messages:
print("Werkzeug version is less than 0.9, trying adhoc certificate.")
ssl_context = "adhoc"
else:
ssl_context = None
if use_reloader and settings.USE_I18N:
try:
from django.utils.autoreload import gen_filenames
except ImportError:
pass
else:
extra_files.extend(filter(lambda filename: filename.endswith('.mo'), gen_filenames()))
run_simple(
self.addr,
int(self.port),
DebuggedApplication(handler, True),
use_reloader=use_reloader,
use_debugger=True,
extra_files=extra_files,
reloader_interval=reloader_interval,
threaded=threaded,
ssl_context=ssl_context,
)
inner_run()
def check_migrations(self):
"""
Checks to see if the set of migrations on disk matches the
migrations in the database. Prints a warning if they don't match.
"""
executor = MigrationExecutor(connections[DEFAULT_DB_ALIAS])
plan = executor.migration_plan(executor.loader.graph.leaf_nodes())
if plan and self.show_startup_messages:
self.stdout.write(self.style.NOTICE("\nYou have unapplied migrations; your app may not work properly until they are applied."))
self.stdout.write(self.style.NOTICE("Run 'python manage.py migrate' to apply them.\n"))
def set_werkzeug_log_color():
"""Try to set color to the werkzeug log.
"""
from django.core.management.color import color_style
from werkzeug.serving import WSGIRequestHandler
from werkzeug._internal import _log
_style = color_style()
_orig_log = WSGIRequestHandler.log
def werk_log(self, type, message, *args):
try:
msg = '%s - - [%s] %s' % (
self.address_string(),
self.log_date_time_string(),
message % args,
)
http_code = str(args[1])
except:
return _orig_log(type, message, *args)
# Utilize terminal colors, if available
if http_code[0] == '2':
# Put 2XX first, since it should be the common case
msg = _style.HTTP_SUCCESS(msg)
elif http_code[0] == '1':
msg = _style.HTTP_INFO(msg)
elif http_code == '304':
msg = _style.HTTP_NOT_MODIFIED(msg)
elif http_code[0] == '3':
msg = _style.HTTP_REDIRECT(msg)
elif http_code == '404':
msg = _style.HTTP_NOT_FOUND(msg)
elif http_code[0] == '4':
msg = _style.HTTP_BAD_REQUEST(msg)
else:
# Any 5XX, or any other response
msg = _style.HTTP_SERVER_ERROR(msg)
_log(type, msg)
WSGIRequestHandler.log = werk_log
|
dpetzold/django-extensions
|
django_extensions/management/commands/runserver_plus.py
|
Python
|
mit
| 21,800
|
[
"VisIt"
] |
23dfcae84d70060fd9cea56ec650ad32c12c19a8568b6a85350830686297d66f
|
# This file is part of wger Workout Manager.
#
# wger Workout Manager is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# wger Workout Manager is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with Workout Manager. If not, see <http://www.gnu.org/licenses/>.
from django.core.cache import cache
from django.core.urlresolvers import reverse
from wger.core.tests import api_base_test
from wger.core.tests.base_testcase import (
WorkoutManagerTestCase,
WorkoutManagerDeleteTestCase,
WorkoutManagerEditTestCase,
WorkoutManagerAddTestCase
)
from wger.exercises.models import Equipment, Exercise
from wger.utils.cache import get_template_cache_name
from wger.utils.constants import PAGINATION_OBJECTS_PER_PAGE
class EquipmentRepresentationTestCase(WorkoutManagerTestCase):
'''
Test the representation of a model
'''
def test_representation(self):
'''
Test that the representation of an object is correct
'''
self.assertEqual("{0}".format(Equipment.objects.get(pk=1)), 'Dumbbells')
class EquipmentShareButtonTestCase(WorkoutManagerTestCase):
'''
Test that the share button is correctly displayed and hidden
'''
def test_share_button(self):
url = reverse('exercise:equipment:overview')
response = self.client.get(url)
self.assertTrue(response.context['show_shariff'])
self.user_login('admin')
response = self.client.get(url)
self.assertTrue(response.context['show_shariff'])
self.user_login('test')
response = self.client.get(url)
self.assertTrue(response.context['show_shariff'])
class AddEquipmentTestCase(WorkoutManagerAddTestCase):
'''
Tests adding a new equipment
'''
object_class = Equipment
url = 'exercise:equipment:add'
data = {'name': 'A new equipment'}
class DeleteEquipmentTestCase(WorkoutManagerDeleteTestCase):
'''
Tests deleting an equipment
'''
object_class = Equipment
url = 'exercise:equipment:delete'
pk = 1
class EditEquipmentTestCase(WorkoutManagerEditTestCase):
'''
Tests editing an equipment
'''
object_class = Equipment
url = 'exercise:equipment:edit'
pk = 1
data = {'name': 'A new name'}
class EquipmentListTestCase(WorkoutManagerTestCase):
'''
Tests the equipment list page (admin view)
'''
def test_overview(self):
# Add more equipments so we can test the pagination
self.user_login('admin')
data = {"name": "A new entry"}
for i in range(0, 50):
self.client.post(reverse('exercise:equipment:add'), data)
# Page exists and the pagination works
response = self.client.get(reverse('exercise:equipment:list'))
self.assertEqual(response.status_code, 200)
self.assertEqual(len(response.context['equipment_list']), PAGINATION_OBJECTS_PER_PAGE)
response = self.client.get(reverse('exercise:equipment:list'), {'page': 2})
self.assertEqual(response.status_code, 200)
self.assertEqual(len(response.context['equipment_list']), PAGINATION_OBJECTS_PER_PAGE)
response = self.client.get(reverse('exercise:equipment:list'), {'page': 3})
self.assertEqual(response.status_code, 200)
self.assertEqual(len(response.context['equipment_list']), 3)
# 'last' is a special case
response = self.client.get(reverse('exercise:equipment:list'), {'page': 'last'})
self.assertEqual(response.status_code, 200)
self.assertEqual(len(response.context['equipment_list']), 3)
# Page does not exist
response = self.client.get(reverse('exercise:equipment:list'), {'page': 100})
self.assertEqual(response.status_code, 404)
response = self.client.get(reverse('exercise:equipment:list'), {'page': 'foobar'})
self.assertEqual(response.status_code, 404)
class EquipmentCacheTestCase(WorkoutManagerTestCase):
'''
Equipment cache test case
'''
def test_equipment_overview(self):
'''
Test the equipment overview cache is correctly generated on visit
'''
if self.is_mobile:
self.client.get(reverse('exercise:equipment:overview'))
else:
self.assertFalse(cache.get(get_template_cache_name('equipment-overview', 2)))
self.client.get(reverse('exercise:equipment:overview'))
self.assertTrue(cache.get(get_template_cache_name('equipment-overview', 2)))
def test_equipmet_cache_update(self):
'''
Test that the template cache for the overview is correctly reseted when
performing certain operations
'''
self.assertFalse(cache.get(get_template_cache_name('equipment-overview', 2)))
self.client.get(reverse('exercise:equipment:overview'))
self.client.get(reverse('exercise:exercise:view', kwargs={'id': 2}))
old_overview = cache.get(get_template_cache_name('equipment-overview', 2))
exercise = Exercise.objects.get(pk=2)
exercise.name = 'Very cool exercise 2'
exercise.description = 'New description'
exercise.equipment.add(Equipment.objects.get(pk=2))
exercise.save()
self.assertFalse(cache.get(get_template_cache_name('equipment-overview', 2)))
self.client.get(reverse('exercise:equipment:overview'))
self.client.get(reverse('exercise:exercise:view', kwargs={'id': 2}))
new_overview = cache.get(get_template_cache_name('equipment-overview', 2))
self.assertNotEqual(old_overview, new_overview)
class EquipmentApiTestCase(api_base_test.ApiBaseResourceTestCase):
'''
Tests the equipment overview resource
'''
pk = 1
resource = Equipment
private_resource = False
|
DeveloperMal/wger
|
wger/exercises/tests/test_equipment.py
|
Python
|
agpl-3.0
| 6,236
|
[
"VisIt"
] |
f2c6514281ec2802857fd04b661368659173297736759841049d4303ac978c90
|
#!/usr/bin/env python
#
# Author: Qiming Sun <osirpt.sun@gmail.com>
#
'''
MP2 analytical nuclear gradients.
'''
from pyscf import gto, scf, mp
mol = gto.M(
atom = [
['O' , 0. , 0. , 0],
['H' , 0. , -0.757 , 0.587],
['H' , 0. , 0.757 , 0.587]],
basis = 'ccpvdz')
mf = scf.RHF(mol).run()
postmf = mp.MP2(mf).run()
g = postmf.nuc_grad_method()
g.kernel()
mf = scf.UHF(mol).x2c().run()
postmf = mp.MP2(mf).run()
# PySCF-1.6.1 and newer supports the .Gradients method to create a grad
# object after grad module was imported. It is equivalent to call the
# .nuc_grad_method method.
from pyscf import grad
g = postmf.Gradients()
g.kernel()
|
gkc1000/pyscf
|
examples/grad/03-mp2_grad.py
|
Python
|
apache-2.0
| 676
|
[
"PySCF"
] |
ada5c5aaf0e2036a2503e50651359b6617b11881c4ac55aa0bf63fad25081b95
|
'''
MFEM example 23
See c++ version in the MFEM library for more detail
'''
import os
import io
import mfem.ser as mfem
from mfem.ser import intArray
from os.path import expanduser, join, dirname
import numpy as np
from numpy import sin, cos, exp, sqrt, pi, abs, array, floor, log, sum
def run(mesh_file="",
ref_levels=2,
order=2,
ode_solver_type=10,
t_final=0.5,
dt=1e-2,
speed=1.0,
dirichlet=True,
visit=True,
visualization=True,
vis_steps=5):
class WaveOperator(mfem.SecondOrderTimeDependentOperator):
def __init__(self, fespace, ess_bdr, speed):
mfem.SecondOrderTimeDependentOperator.__init__(
self, fespace.GetTrueVSize(), 0.0)
self.ess_tdof_list = mfem.intArray()
rel_tol = 1e-8
fespace.GetEssentialTrueDofs(ess_bdr, self.ess_tdof_list)
c2 = mfem.ConstantCoefficient(speed*speed)
K = mfem.BilinearForm(fespace)
K.AddDomainIntegrator(mfem.DiffusionIntegrator(c2))
K.Assemble()
self.Kmat0 = mfem.SparseMatrix()
self.Kmat = mfem.SparseMatrix()
dummy = mfem.intArray()
K.FormSystemMatrix(dummy, self.Kmat0)
K.FormSystemMatrix(self.ess_tdof_list, self.Kmat)
self.K = K
self.Mmat = mfem.SparseMatrix()
M = mfem.BilinearForm(fespace)
M.AddDomainIntegrator(mfem.MassIntegrator())
M.Assemble()
M.FormSystemMatrix(self.ess_tdof_list, self.Mmat)
self.M = M
M_solver = mfem.CGSolver()
M_prec = mfem.DSmoother()
M_solver.iterative_mode = False
M_solver.SetRelTol(rel_tol)
M_solver.SetAbsTol(0.0)
M_solver.SetMaxIter(30)
M_solver.SetPrintLevel(0)
M_solver.SetPreconditioner(M_prec)
M_solver.SetOperator(self.Mmat)
self.M_prec = M_prec
self.M_solver = M_solver
T_solver = mfem.CGSolver()
T_prec = mfem.DSmoother()
T_solver.iterative_mode = False
T_solver.SetRelTol(rel_tol)
T_solver.SetAbsTol(0.0)
T_solver.SetMaxIter(100)
T_solver.SetPrintLevel(0)
T_solver.SetPreconditioner(T_prec)
self.T_prec = T_prec
self.T_solver = T_solver
self.T = None
def Mult(self, u, du_dt, d2udt2):
# Compute:
# d2udt2 = M^{-1}*-K(u)
# for d2udt2
z = mfem.Vector(u.Size())
self.Kmat.Mult(u, z)
z.Neg() # z = -z
self.M_solver.Mult(z, d2udt2)
def ImplicitSolve(self, fac0, fac1, u, dudt, d2udt2):
# Solve the equation:
# d2udt2 = M^{-1}*[-K(u + fac0*d2udt2)]
# for d2udt2
if self.T is None:
self.T = mfem.Add(1.0, self.Mmat, fac0, self.Kmat)
self.T_solver.SetOperator(self.T)
z = mfem.Vector(u.Size())
self.Kmat0.Mult(u, z)
z.Neg()
# iterate over Array<int> :D
for j in self.ess_tdof_list:
z[j] = 0.0
self.T_solver.Mult(z, d2udt2)
def SetParameters(self, u):
self.T = None
mesh = mfem.Mesh(mesh_file, 1, 1)
dim = mesh.Dimension()
# 3. Define the ODE solver used for time integration. Several second order
# time integrators are available.
if ode_solver_type <= 10:
ode_solver = mfem.GeneralizedAlpha2Solver(ode_solver_type/10)
elif ode_solver_type == 11:
ode_solver = mfem.AverageAccelerationSolver()
elif ode_solver_type == 12:
ode_solver = mfem.LinearAccelerationSolver()
elif ode_solver_type == 13:
ode_solver = mfem.CentralDifferenceSolver()
elif ode_solver_type == 14:
ode_solver = mfem.FoxGoodwinSolver()
else:
print("Unknown ODE solver type: " + str(ode_solver_type))
# 4. Refine the mesh to increase the resolution. In this example we do
# 'ref_levels' of uniform refinement, where 'ref_levels' is a
# command-line parameter.
for lev in range(ref_levels):
mesh.UniformRefinement()
# 5. Define the vector finite element space representing the current and the
# initial temperature, u_ref.
fe_coll = mfem.H1_FECollection(order, dim)
fespace = mfem.FiniteElementSpace(mesh, fe_coll)
fe_size = fespace.GetTrueVSize()
print("Number of temperature unknowns: " + str(fe_size))
u_gf = mfem.GridFunction(fespace)
dudt_gf = mfem.GridFunction(fespace)
# 6. Set the initial conditions for u. All boundaries are considered
# natural.
class cInitialSolution(mfem.PyCoefficient):
def EvalValue(self, x):
norm2 = sum(x**2)
return exp(-norm2*30)
class cInitialRate(mfem.PyCoefficient):
def EvalValue(self, x):
return 0
u_0 = cInitialSolution()
dudt_0 = cInitialRate()
u_gf.ProjectCoefficient(u_0)
u = mfem.Vector()
u_gf.GetTrueDofs(u)
dudt_gf.ProjectCoefficient(dudt_0)
dudt = mfem.Vector()
dudt_gf.GetTrueDofs(dudt)
# 7. Initialize the conduction operator and the visualization.
ess_bdr = mfem.intArray()
if mesh.bdr_attributes.Size():
ess_bdr.SetSize(mesh.bdr_attributes.Max())
if (dirichlet):
ess_bdr.Assign(1)
else:
ess_bdr.Assigne(0)
oper = WaveOperator(fespace, ess_bdr, speed)
u_gf.SetFromTrueDofs(u)
mesh.Print("ex23.mesh", 8)
output = io.StringIO()
output.precision = 8
u_gf.Save(output)
dudt_gf.Save(output)
fid = open("ex23-init.gf", 'w')
fid.write(output.getvalue())
fid.close()
if visit:
visit_dc = mfem.VisItDataCollection("Example23", mesh)
visit_dc.RegisterField("solution", u_gf)
visit_dc.RegisterField("rate", dudt_gf)
visit_dc.SetCycle(0)
visit_dc.SetTime(0.0)
visit_dc.Save()
if visualization:
sout = mfem.socketstream("localhost", 19916)
if not sout.good():
print("Unable to connect to GLVis server at localhost:19916")
visualization = False
print("GLVis visualization disabled.")
else:
sout.precision(precision)
sout << "solution\n" << mesh << dudt_gf
sout << "pause\n"
sout.flush()
print(
"GLVis visualization paused. Press space (in the GLVis window) to resume it.")
# 8. Perform time-integration (looping over the time iterations, ti, with a
# time-step dt).
ode_solver.Init(oper)
t = 0.0
last_step = False
ti = 0
while not last_step:
ti += 1
if t + dt >= t_final - dt/2:
last_step = True
t, dt = ode_solver.Step(u, dudt, t, dt)
if last_step or (ti % vis_steps == 0):
print("step " + str(ti) + ", t = " + "{:g}".format(t))
u_gf.SetFromTrueDofs(u)
dudt_gf.SetFromTrueDofs(dudt)
if visualization:
sout << "solution\n" << mesh << u_gf
sout.flush()
if visit:
visit_dc.SetCycle(ti)
visit_dc.SetTime(t)
visit_dc.Save()
oper.SetParameters(u)
# 9. Save the final solution. This output can be viewed later using GLVis:
# "glvis -m ex23.mesh -g ex23-final.gf".
output = io.StringIO()
output.precision = 8
u_gf.Save(output)
dudt_gf.Save(output)
fid = open("ex23-final.gf", "w")
fid.write(output.getvalue())
fid.close()
if __name__ == "__main__":
from mfem.common.arg_parser import ArgParser
parser = ArgParser(description="Ex23 (Wave problem)")
parser.add_argument('-m', '--mesh',
default='star.mesh',
action='store', type=str,
help='Mesh file to use.')
parser.add_argument('-r', '--refine',
action='store', default=2, type=int,
help="Number of times to refine the mesh uniformly before parallel")
parser.add_argument('-o', '--order',
action='store', default=2, type=int,
help="Finite element order (polynomial degree)")
help_ode = '\n'.join(["ODE solver: [0--10] \t- GeneralizedAlpha(0.1 * s),",
"11 \t - Average Acceleration,",
"12 \t - Linear Acceleration",
"13 \t- CentralDifference",
"14 \t- FoxGoodwin"])
parser.add_argument('-s', '--ode-solver',
action='store', default=10, type=int,
help=help_ode)
parser.add_argument('-tf', '--t-final',
action='store', default=0.5, type=float,
help="Final time; start time is 0.")
parser.add_argument('-dt', '--time-step',
action='store', default=1e-2, type=float,
help="Time step")
parser.add_argument("-c", "--speed",
action='store', default=1.0, type=float,
help="Wave speed.")
parser.add_argument("-neu", "--neumann",
action='store_true', default=False,
help="BC switch.")
parser.add_argument('-vis', '--visualization',
action='store_true', default=True,
help='Enable GLVis visualization')
parser.add_argument('-visit', '--visit-datafiles',
action='store_true', default=True,
help="Save data files for VisIt (visit.llnl.gov) visualization.")
parser.add_argument("-vs", "--visualization-steps",
action='store', default=5, type=int,
help="Visualize every n-th timestep.")
args = parser.parse_args()
parser.print_options(args)
mesh_file = expanduser(
join(os.path.dirname(__file__), '..', 'data', args.mesh))
run(mesh_file=mesh_file,
ref_levels=args.refine,
order=args.order,
ode_solver_type=args.ode_solver,
t_final=args.t_final,
dt=args.time_step,
speed=args.speed,
dirichlet=(not args.neumann),
visit=args.visit_datafiles,
vis_steps=args.visualization_steps,
visualization=args.visualization)
|
mfem/PyMFEM
|
examples/ex23.py
|
Python
|
bsd-3-clause
| 10,605
|
[
"VisIt"
] |
e09c3a55ccf0f57003fba87bc2593a3ea84600479a864a03cb1073c7329c38f1
|
"""
KeepNote
Notebook indexing
"""
#
# KeepNote
# Copyright (c) 2008-2009 Matt Rasmussen
# Author: Matt Rasmussen <rasmus@mit.edu>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; version 2 of the License.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301, USA.
#
# python imports
from itertools import chain
import os
import sys
import time
import traceback
# import sqlite
try:
import pysqlite2
import pysqlite2.dbapi2 as sqlite
except Exception, e:
import sqlite3 as sqlite
#sqlite.enable_shared_cache(True)
#sqlite.threadsafety = 0
# keepnote imports
import keepnote
import keepnote.notebook
# index filename
INDEX_FILE = u"index.sqlite"
INDEX_VERSION = 3
NULL = object()
def match_words(infile, words):
"""Returns True if all of the words in list 'words' appears in the
node title or data file"""
matches = dict.fromkeys(words, False)
for line in infile:
line = line.lower()
for word in words:
if word in line:
matches[word] = True
# return True if all words are found (AND)
for val in matches.itervalues():
if not val:
return False
return True
def read_data_as_plain_text(conn, nodeid):
"""Iterates over the lines of the data file as plain text"""
try:
infile = conn.open_file(
nodeid, keepnote.notebook.PAGE_DATA_FILE, "r", codec="utf-8")
for line in keepnote.notebook.read_data_as_plain_text(infile):
yield line
infile.close()
except:
pass
class AttrIndex (object):
"""Indexing information for an attribute"""
def __init__(self, name, type, multivalue=False, index_value=False):
self._name = name
self._type = type
self._table_name = "Attr_" + name
self._index_name = "IdxAttr_" + name + "_nodeid"
self._multivalue = multivalue
self._index_value = index_value
self._index_value_name = "IdxAttr_" + name + "_value"
def get_name(self):
return self._name
def get_table_name(self):
return self._table_name
def is_multivalue(self):
return self._multivalue
def init(self, cur):
"""Initialize attribute index for database"""
# multivalue is not implemented yet
assert not self._multivalue
cur.execute(u"""CREATE TABLE IF NOT EXISTS %s
(nodeid TEXT,
value %s,
UNIQUE(nodeid) ON CONFLICT REPLACE);
""" % (self._table_name, self._type))
cur.execute(u"""CREATE INDEX IF NOT EXISTS %s
ON %s (nodeid);""" % (self._index_name,
self._table_name))
if self._index_value:
cur.execute(u"""CREATE INDEX IF NOT EXISTS %s
ON %s (value);""" % (self._index_value_name,
self._table_name))
def drop(self, cur):
cur.execute(u"DROP TABLE IF EXISTS %s" % self._table_name)
def add_node(self, cur, nodeid, attr):
val = attr.get(self._name, NULL)
if val is not NULL:
self.set(cur, nodeid, val)
def remove_node(self, cur, nodeid):
"""Remove node from index"""
cur.execute(u"DELETE FROM %s WHERE nodeid=?" % self._table_name,
(nodeid,))
def get(self, cur, nodeid):
"""Get information for a node from the index"""
cur.execute(u"""SELECT value FROM %s WHERE nodeid = ?""" %
self._table_name, (nodeid,))
values = [row[0] for row in cur.fetchall()]
# return value
if self._multivalue:
return values
else:
if len(values) == 0:
return None
else:
return values[0]
def set(self, cur, nodeid, value):
"""Set the information for a node in the index"""
# insert new row
cur.execute(u"""INSERT INTO %s VALUES (?, ?)""" % self._table_name,
(nodeid, value))
# TODO: remove uniroot
class NoteBookIndex (object):
"""Index for a NoteBook"""
def __init__(self, conn, index_file):
self._nconn = conn
self._index_file = index_file
self._uniroot = keepnote.notebook.UNIVERSAL_ROOT
self._attrs = {}
# index state/capabilities
self._need_index = False
self._corrupt = False
self._has_fulltext = False
self.con = None # sqlite connection
self.cur = None # sqlite cursor
# start index
self.open()
#-----------------------------------------
# index connection
def open(self, auto_clear=True):
"""Open connection to index"""
try:
self._corrupt = False
self._need_index = False
self.con = sqlite.connect(self._index_file,
#isolation_level="DEFERRED",
isolation_level="IMMEDIATE",
check_same_thread=False)
self.cur = self.con.cursor()
#self.con.execute(u"PRAGMA read_uncommitted = true;")
self.init_index(auto_clear=auto_clear)
except sqlite.DatabaseError, e:
self._on_corrupt(e, sys.exc_info()[2])
raise
def close(self):
"""Close connection to index"""
if self.con is not None:
try:
self.con.commit()
self.con.close()
except:
# close should always happen without propogating errors
pass
self.con = None
self.cur = None
def save(self):
"""Save index"""
try:
mtime = time.time()
self.con.execute("""UPDATE NodeGraph SET mtime = ? WHERE nodeid = ?;""",
(mtime, self._nconn.get_rootid()))
if self.con is not None:
try:
self.con.commit()
except:
self.open()
except Exception, e:
self._on_corrupt(e, sys.exc_info()[2])
def clear(self):
"""Erases database file and reinitializes"""
self.close()
if self._index_file:
if os.path.exists(self._index_file):
os.remove(self._index_file)
self.open(auto_clear=False)
#-----------------------------------------
# index initialization and versioning
def _get_version(self):
"""Get version from database"""
self.con.execute(u"""CREATE TABLE IF NOT EXISTS Version
(version INTEGER, update_date DATE);""")
version = self.con.execute(u"SELECT MAX(version) FROM Version").fetchone()
if version is not None:
version = version[0]
return version
def _set_version(self, version=INDEX_VERSION):
"""Set the version of the database"""
self.con.execute(u"INSERT INTO Version VALUES (?, datetime('now'));",
(version,))
def init_index(self, auto_clear=True):
"""Initialize the tables in the index if they do not exist"""
con = self.con
try:
# check database version
version = self._get_version()
if version is None or version != INDEX_VERSION:
# version does not match, drop all tables
self._drop_tables()
# update version
self._set_version()
self._need_index = True
# init NodeGraph table
con.execute(u"""CREATE TABLE IF NOT EXISTS NodeGraph
(nodeid TEXT,
parentid TEXT,
basename TEXT,
mtime FLOAT,
symlink BOOLEAN,
UNIQUE(nodeid) ON CONFLICT REPLACE);
""")
con.execute(u"""CREATE INDEX IF NOT EXISTS IdxNodeGraphNodeid
ON NodeGraph (nodeid);""")
con.execute(u"""CREATE INDEX IF NOT EXISTS IdxNodeGraphParentid
ON NodeGraph (parentid);""")
# full text table
try:
# test for fts3 availability
con.execute(u"DROP TABLE IF EXISTS fts3test;")
con.execute(
"CREATE VIRTUAL TABLE fts3test USING fts3(col TEXT);")
con.execute("DROP TABLE fts3test;")
# create fulltext table if it does not already exist
if not list(con.execute(u"""SELECT 1 FROM sqlite_master
WHERE name == 'fulltext';""")):
con.execute(u"""CREATE VIRTUAL TABLE
fulltext USING
fts3(nodeid TEXT, content TEXT,
tokenize=porter);""")
self._has_fulltext = True
except Exception, e:
keepnote.log_error(e)
self._has_fulltext = False
# TODO: make an Attr table
# this will let me query whether an attribute is currently being
# indexed and in what table it is in.
#con.execute(u"""CREATE TABLE IF NOT EXISTS AttrDefs
# (attr TEXT,
# type );
# """)
# initialize attribute tables
for attr in self._attrs.itervalues():
attr.init(self.cur)
con.commit()
# check whether index is uptodate
#if not self._need_index:
# self._need_index = self.check_index()
except sqlite.DatabaseError, e:
self._on_corrupt(e, sys.exc_info()[2])
keepnote.log_message("reinitializing index '%s'\n" %
self._index_file)
self.clear()
def is_corrupt(self):
"""Return True if database appear corrupt"""
return self._corrupt
def check_index(self):
"""Check filesystem to see if index is up to date"""
keepnote.log_message("checking index... ")
start = time.time()
mtime_index = self.get_mtime()
mtime = keepnote.notebook.connection.fs.last_node_change(
self._nconn._get_node_path(self._nconn.get_rootid()))
keepnote.log_message("%f seconds\n" % (time.time() - start))
return (mtime > mtime_index)
def _on_corrupt(self, error, tracebk=None):
self._corrupt = True
self._need_index = True
# display error
keepnote.log_error(error, tracebk)
# TODO: reload database?
def add_attr(self, attr):
"""Add indexing for a node attribute using AttrIndex"""
self._attrs[attr.get_name()] = attr
if self.cur:
attr.init(self.cur)
return attr
def _drop_tables(self):
"""drop NodeGraph tables"""
self.con.execute(u"DROP TABLE IF EXISTS NodeGraph")
self.con.execute(u"DROP INDEX IF EXISTS IdxNodeGraphNodeid")
self.con.execute(u"DROP INDEX IF EXISTS IdxNodeGraphParentid")
self.con.execute(u"DROP TABLE IF EXISTS fulltext;")
# drop attribute tables
table_names = [x for (x,) in self.con.execute(
u"""SELECT name FROM sqlite_master WHERE name LIKE 'Attr_%'""")]
for table_name in table_names:
self.con.execute(u"""DROP TABLE %s;""" % table_name)
def index_needed(self):
"""Returns True if indexing is needed"""
return self._need_index
def set_index_needed(self, val=True):
self._need_index = val
def has_fulltext_search(self):
return self._has_fulltext
#-------------------------------------
# add/remove nodes from index
# TODO: prevent "unmanaged change detected" warning when doing index_all()
# Also I think double indexing is occuring
def index_all(self, rootid=None):
"""
Reindex all nodes under 'rootid'
This function returns an iterator which must be iterated to completion.
"""
visit = set()
conn = self._nconn
if rootid is None:
rootid = conn.get_rootid()
def preorder(conn, nodeid):
"""Iterate through nodes in pre-order traversal"""
queue = [nodeid]
while len(queue) > 0:
nodeid = queue.pop()
yield nodeid
queue.extend(
conn._list_children_nodeids(nodeid, _index=False))
# perform indexing
for nodeid in preorder(conn, rootid):
yield nodeid
# record index complete
self._need_index = False
def get_node_mtime(self, nodeid):
self.cur.execute(u"""SELECT mtime FROM NodeGraph
WHERE nodeid=?""", (nodeid,))
row = self.cur.fetchone()
if row:
return row[0]
else:
return 0.0
def set_node_mtime(self, nodeid, mtime=None):
if mtime is None:
mtime = time.time()
self.con.execute("""UPDATE NodeGraph SET mtime = ? WHERE nodeid = ?;""",
(mtime, nodeid))
def get_mtime(self):
"""Get last modification time of the index"""
return os.stat(self._index_file).st_mtime
def add_node(self, nodeid, parentid, basename, attr, mtime):
"""Add a node to the index"""
# TODO: remove single parent assumption
if self.con is None:
return
try:
# get info
if parentid is None:
parentid = self._uniroot
basename = u""
symlink = False
# update nodegraph
self.cur.execute(
u"""INSERT INTO NodeGraph VALUES (?, ?, ?, ?, ?)""",
(nodeid, parentid, basename, mtime, symlink))
# update attrs
for attrindex in self._attrs.itervalues():
attrindex.add_node(self.cur, nodeid, attr)
# update fulltext
infile = read_data_as_plain_text(self._nconn, nodeid)
self.index_node_text(nodeid, attr, infile)
except Exception, e:
keepnote.log_error("error index node %s '%s'" %
(nodeid, attr.get("title", "")))
self._on_corrupt(e, sys.exc_info()[2])
def remove_node(self, nodeid):
"""Remove node from index using nodeid"""
if self.con is None:
return
try:
# delete node
self.cur.execute(u"DELETE FROM NodeGraph WHERE nodeid=?", (nodeid,))
# update attrs
for attr in self._attrs.itervalues():
attr.remove_node(self.cur, nodeid)
# delete children
#for (childid,) in self.cur.execute(
# u"SELECT nodeid FROM NodeGraph WHERE parentid=?", (nodeid,)):
# self.remove_node(childid)
except sqlite.DatabaseError, e:
self._on_corrupt(e, sys.exc_info()[2])
def index_node_text(self, nodeid, attr, infile):
try:
text = attr.get("title", "") + "\n" + "".join(infile)
self.insert_text(nodeid, text)
except Exception, e:
keepnote.log_error()
def insert_text(self, nodeid, text):
if not self._has_fulltext:
return
if list(self.cur.execute(u"SELECT 1 FROM fulltext WHERE nodeid = ?",
(nodeid,))):
self.cur.execute(
u"UPDATE fulltext SET content = ? WHERE nodeid = ?;",
(text, nodeid))
else:
self.cur.execute(u"INSERT INTO fulltext VALUES (?, ?);",
(nodeid, text))
#-------------------------
# queries
def get_node_path(self, nodeid):
"""Get node path for a nodeid"""
# TODO: handle multiple parents
visit = set([nodeid])
path = []
parentid = None
try:
while parentid != self._uniroot:
# continue to walk up parent
path.append(nodeid)
self.cur.execute(u"""SELECT nodeid, parentid, basename
FROM NodeGraph
WHERE nodeid=?""", (nodeid,))
row = self.cur.fetchone()
# nodeid is not index
if row is None:
return None
nodeid, parentid, basename = row
# parent has unexpected loop
if parentid in visit:
self._on_corrupt(Exception("unexpect parent path loop"))
return None
# walk up
nodeid = parentid
path.reverse()
return path
except sqlite.DatabaseError, e:
self._on_corrupt(e, sys.exc_info()[2])
raise
def get_node_filepath(self, nodeid):
"""Get node path for a nodeid"""
# TODO: handle multiple parents
visit = set([nodeid])
path = []
parentid = None
try:
while parentid != self._uniroot:
# continue to walk up parent
self.cur.execute(u"""SELECT nodeid, parentid, basename
FROM NodeGraph
WHERE nodeid=?""", (nodeid,))
row = self.cur.fetchone()
# nodeid is not index
if row is None:
return None
nodeid, parentid, basename = row
if basename != "":
path.append(basename)
# parent has unexpected loop
if parentid in visit:
self._on_corrupt(Exception("unexpect parent path loop"))
return None
# walk up
nodeid = parentid
path.reverse()
return path
except sqlite.DatabaseError, e:
self._on_corrupt(e, sys.exc_info()[2])
raise
def get_node(self, nodeid):
"""Get node data for a nodeid"""
# TODO: handle multiple parents
try:
self.cur.execute(u"""SELECT nodeid, parentid, basename, mtime
FROM NodeGraph
WHERE nodeid=?""", (nodeid,))
row = self.cur.fetchone()
# nodeid is not index
if row is None:
return None
return {"nodeid": row[0],
"parentid": row[1],
"basename": row[2],
"mtime": row[3]}
except sqlite.DatabaseError, e:
self._on_corrupt(e, sys.exc_info()[2])
raise
def has_node(self, nodeid):
"""Returns True if index has node"""
self.cur.execute(u"""SELECT nodeid, parentid, basename, mtime
FROM NodeGraph
WHERE nodeid=?""", (nodeid,))
return self.cur.fetchone() is not None
def list_children(self, nodeid):
try:
self.cur.execute(u"""SELECT nodeid, basename
FROM NodeGraph
WHERE parentid=?""", (nodeid,))
return list(self.cur.fetchall())
except sqlite.DatabaseError, e:
self._on_corrupt(e, sys.exc_info()[2])
raise
def has_children(self, nodeid):
try:
self.cur.execute(u"""SELECT nodeid
FROM NodeGraph
WHERE parentid=?""", (nodeid,))
return self.cur.fetchone() != None
except sqlite.DatabaseError, e:
self._on_corrupt(e, sys.exc_info()[2])
raise
def search_titles(self, query):
"""Return nodeids of nodes with matching titles"""
if "title" not in self._attrs:
return []
try:
# order titles by exact matches and then alphabetically
self.cur.execute(
u"""SELECT nodeid, value FROM %s WHERE value LIKE ?
ORDER BY value != ?, value """ %
self._attrs["title"].get_table_name(),
(u"%" + query + u"%", query))
return list(self.cur.fetchall())
except sqlite.DatabaseError, e:
self._on_corrupt(e, sys.exc_info()[2])
raise
def get_attr(self, nodeid, key):
"""Query indexed attribute for a node"""
attr = self._attrs.get(key, None)
if attr:
return attr.get(self.cur, nodeid)
else:
return None
def search_contents(self, text):
# TODO: implement fully general fix
# crude cleaning
text = text.replace('"', "")
# fallback if fts3 is not available
if not self._has_fulltext:
words = [x.lower() for x in text.strip().split()]
return self._search_manual(words)
cur = self.con.cursor()
# search db with fts3
try:
res = cur.execute("""SELECT nodeid FROM fulltext
WHERE content MATCH ?;""", (text,))
return (row[0] for row in res)
except:
keepnote.log_error("SQLITE error while performing search")
return []
def _search_manual(self, words):
"""Recursively search nodes under node for occurrence of words"""
keepnote.log_message("manual search")
nodeid = self._nconn.get_rootid()
stack = [nodeid]
while len(stack) > 0:
nodeid = stack.pop()
title = self._nconn.read_node(nodeid).get("title", "").lower()
infile = chain([title],
read_data_as_plain_text(self._nconn, nodeid))
if match_words(infile, words):
yield nodeid
else:
# return frequently so that search does not block long
yield None
children = self._nconn._list_children_nodeids(nodeid)
stack.extend(children)
|
reshadh/Keepnote-LaTeX
|
keepnote/notebook/connection/fs/index.py
|
Python
|
gpl-2.0
| 23,492
|
[
"VisIt"
] |
6e9f8432a585ba48483390bf3187c46978a69678d67c3b5a5442393abc365cd9
|
"""
===========================
Formaldehyde mm-line fitter
===========================
This is a formaldehyde 3_03-2_02 / 3_22-221 and 3_03-2_02/3_21-2_20 fitter.
It is based entirely on RADEX models.
"""
from __future__ import print_function
import numpy as np
from . import hyperfine
from . import fitter,model#,modelgrid
from astropy.extern.six.moves import xrange
try: # for model grid reading
import astropy.io.fits as pyfits
except ImportError:
import pyfits
try:
import scipy.interpolate
import scipy.ndimage
scipyOK = True
except ImportError:
scipyOK=False
try:
from despotic import cloud
Democracy=False
except ImportError:
Democracy=True # Because it's not despotic :D
from astropy.utils.console import ProgressBar
from astropy.table import Table
import warnings
line_names = ['threeohthree','threetwotwo','threetwoone']
# http://adsabs.harvard.edu/abs/1971ApJ...169..429T has the most accurate freqs
# http://adsabs.harvard.edu/abs/1972ApJ...174..463T [twotwo]
central_freq_dict = {
'threeohthree': 218.222192e9,
'threetwotwo': 218.475632e9,
'threetwoone': 218.760066e9,
}
line_strength_dict={
'threeohthree': 1.,
'threetwotwo': 1.,
'threetwoone': 1.,
}
relative_strength_total_degeneracy={
'threeohthree': 1.,
'threetwotwo': 1.,
'threetwoone': 1.,
}
freq_dict = central_freq_dict
aval_dict = {
'threeohthree': 2.818e-4,
'threetwotwo': 1.571e-4,
'threetwoone': 1.577e-4,
}
voff_lines_dict = {
'threeohthree': 0.,
'threetwotwo': 0.,
'threetwoone': 0.,
}
formaldehyde_mm_vtau = hyperfine.hyperfinemodel(line_names, voff_lines_dict,
freq_dict, line_strength_dict, relative_strength_total_degeneracy)
formaldehyde_mm_vtau_fitter = formaldehyde_mm_vtau.fitter
formaldehyde_mm_vtau_vheight_fitter = formaldehyde_mm_vtau.vheight_fitter
def build_despotic_grids(gridfile='ph2co_grid_despotic.fits', ph2coAbund=1e-8,
nDens=21, logDensLower=2.0, logDensUpper=6.0,
nCol=21, logColLower=11.0, logColUpper=15.0,
nTemp=51, Tlower=10.0, Tupper=300.0,
nDv=5, DvLower=1.0, DvUpper=5.0):
"""
Generates grids of p-H2CO line intensities using Despotic. Outputs a astropy Table.
Parameters
----------
gridfile : string
Name of grid file to output.
ph2coAbund : float
Fractional abundance of p-H2CO
nDens : int
Number of grid points in the volume density
logDensLower : float
log of volume density at lower bound of grid (log(n/cm**-3))
logDensUpper : float
log of volume density at upper bound of grid (log(n/cm**-3))
nCol : int
Number of grid points in the column density
logColLower : float
log of column density of p-H2CO at lower bound of grid (log(N/cm**-2))
logColUpper : float
log of column density of p-H2CO at upper bound of grid (log(N/cm**-2))
nTemp : int
Number of grid points in the temperature grid
Tower : float
temperature at lower bound of grid (K)
Tupper : float
temperature at upper bound of grid (K)
nDv : int
Number of grid points in the line width
DvLower : float
line width (non-thermal) at lower bound of grid (km/s)
DvUpper : float
line width (non-thermal) at upper bound of grid (km/s)
"""
if Democracy:
raise Exception("No despotic install found. Cannot build grids")
core = cloud(fileName="protostellarCore.desp", verbose=True)
nlower = logDensLower
nupper = logDensUpper
Nlower = logColLower
Nupper = logColUpper
Temps = np.linspace(Tlower, Tupper, nTemp)
Cols = 1e1**np.linspace(Nlower, Nupper, nCol)
Densities = 1e1**(np.linspace(nlower, nupper, nDens))
LineWidth = np.linspace(DvLower, DvUpper, nDv)
outtable = Table(names = ['Tex_303_202', 'Tex_322_221', 'Tex_321_220',
'tau_303_202', 'tau_322_221', 'tau_321_220',
'Temperature', 'Column', 'nH2', 'sigmaNT'])
TempArr, ColArr, DensArr, DvArr = np.meshgrid(Temps,
Cols,
Densities,
LineWidth)
for T, N, n, dv in ProgressBar(zip(TempArr.flatten(),
ColArr.flatten(),
DensArr.flatten(),
DvArr.flatten())):
core.colDen = N/ph2coAbund
core.Tg = T
core.Td = T
core.nH = n
core.sigmaNT = dv
lines = core.lineLum('p-h2co')
outtable.add_row()
outtable[-1]['Tex_303_202'] = lines[2]['Tex']
outtable[-1]['tau_303_202'] = lines[2]['tau']
outtable[-1]['Tex_322_221'] = lines[9]['Tex']
outtable[-1]['tau_322_221'] = lines[9]['tau']
outtable[-1]['Tex_321_220'] = lines[12]['Tex']
outtable[-1]['tau_321_220'] = lines[12]['tau']
outtable[-1]['Temperature'] = T
outtable[-1]['Column'] = N
outtable[-1]['nH2'] = n
outtable[-1]['sigmaNT'] = dv
outtable.write(gridfile, format='fits',overwrite=True)
def formaldehyde_mm_despotic_functions(gridtable):
"""
This builds interpolation functions for use in fitting.
Parameters
----------
gridtable : str
Name of grid in astropy table
Returns
-------
h2co_303_202, h2co_322_221, h2co_321_220 : function
Functions that return the excitation temperature and optical depth given input density,
temperature, column density and line width.
"""
if gridtable is None:
warnings.warn("No gridfile found. Building grids using despotic")
try:
build_despotic_grids('ph2co_grid_despotic.fits')
gridtable = Table.read('ph2co_grid_despotic.fits')
except: # TODO -- make this more specific
warnings.warn("Failed to build functions because no grids available")
return
DensArr = np.sort(np.unique(gridtable['nH2']))
ColArr = np.sort(np.unique(gridtable['Column']))
TempArr = np.sort(np.unique(gridtable['Temperature']))
DvArr = np.sort(np.unique(gridtable['sigmaNT']))
GridData_Tex_303_202 = np.zeros((len(DensArr), len(ColArr),
len(TempArr), len(DvArr))) + np.nan
GridData_Tex_322_221 = np.zeros((len(DensArr), len(ColArr),
len(TempArr), len(DvArr))) + np.nan
GridData_Tex_321_220 = np.zeros((len(DensArr), len(ColArr),
len(TempArr), len(DvArr))) + np.nan
GridData_tau_303_202 = np.zeros((len(DensArr), len(ColArr),
len(TempArr), len(DvArr))) + np.nan
GridData_tau_322_221 = np.zeros((len(DensArr), len(ColArr),
len(TempArr), len(DvArr))) + np.nan
GridData_tau_321_220 = np.zeros((len(DensArr), len(ColArr),
len(TempArr), len(DvArr))) + np.nan
ii = np.interp(gridtable['nH2'], DensArr, np.arange(len(DensArr))).astype(np.int)
jj = np.interp(gridtable['Column'], ColArr, np.arange(len(ColArr))).astype(np.int)
kk = np.interp(gridtable['Temperature'], TempArr, np.arange(len(TempArr))).astype(np.int)
ll = np.interp(gridtable['sigmaNT'], DvArr, np.arange(len(DvArr))).astype(np.int)
GridData_Tex_303_202[ii, jj, kk, ll] = gridtable['Tex_303_202']
GridData_Tex_322_221[ii, jj, kk, ll] = gridtable['Tex_322_221']
GridData_Tex_321_220[ii, jj, kk, ll] = gridtable['Tex_321_220']
GridData_tau_303_202[ii, jj, kk, ll] = gridtable['tau_303_202']
GridData_tau_322_221[ii, jj, kk, ll] = gridtable['tau_322_221']
GridData_tau_321_220[ii, jj, kk, ll] = gridtable['tau_321_220']
def h2co_303_202(logdensity=4, logcolumn=13, temperature=25, sigmav=2.0):
iidx = np.interp(logdensity, np.log10(DensArr), np.arange(len(DensArr)))
jidx = np.interp(logcolumn, np.log10(ColArr), np.arange(len(ColArr)))
kidx = np.interp(temperature, TempArr, np.arange(len(TempArr)))
lidx = np.interp(sigmav, DvArr, np.arange(len(DvArr)))
xvec = np.array([iidx, jidx, kidx, lidx])
xvec.shape += (1,)
Tex = scipy.ndimage.interpolation.map_coordinates(GridData_Tex_303_202,
xvec)
tau = scipy.ndimage.interpolation.map_coordinates(GridData_tau_303_202,
xvec)
return (Tex, tau)
def h2co_322_221(logdensity=4, logcolumn=13, temperature=25, sigmav=2.0):
iidx = np.interp(logdensity, np.log10(DensArr), np.arange(len(DensArr)))
jidx = np.interp(logcolumn, np.log10(ColArr), np.arange(len(ColArr)))
kidx = np.interp(temperature, TempArr, np.arange(len(TempArr)))
lidx = np.interp(sigmav, DvArr, np.arange(len(DvArr)))
xvec = np.array([iidx, jidx, kidx, lidx])
xvec.shape += (1,)
Tex = scipy.ndimage.interpolation.map_coordinates(GridData_Tex_322_221, xvec)
tau = scipy.ndimage.interpolation.map_coordinates(GridData_tau_322_221, xvec)
return (Tex, tau)
def h2co_321_220(logdensity=4, logcolumn=13, temperature=25, sigmav=2.0):
iidx = np.interp(logdensity, np.log10(DensArr), np.arange(len(DensArr)))
jidx = np.interp(logcolumn, np.log10(ColArr), np.arange(len(ColArr)))
kidx = np.interp(temperature, TempArr, np.arange(len(TempArr)))
lidx = np.interp(sigmav, DvArr, np.arange(len(DvArr)))
xvec = np.array([iidx, jidx, kidx, lidx])
xvec.shape += (1,)
Tex = scipy.ndimage.interpolation.map_coordinates(GridData_Tex_321_220, xvec)
tau = scipy.ndimage.interpolation.map_coordinates(GridData_tau_321_220, xvec)
return (Tex, tau)
return (h2co_303_202, h2co_322_221, h2co_321_220)
def formaldehyde_mm_despotic(xarr,
temperature=25,
column=13,
density=4,
xoff_v=0.0,
width=1.0,
grid_vwidth=1.0,
h2co_303_202=None,
h2co_322_221=None,
h2co_321_220=None,
debug=False,
verbose=False,
**kwargs):
"""
Fitter to p-H2CO using despotic grids. Requires building grids and passing in
functions for interpolating the h2co transition optical depth and
excitation temperatures.
"""
Tex303_202, tau303_202 = h2co_303_202(logdensity=density,
logcolumn=column,
temperature=temperature,
sigmav=width)
Tex322_221, tau322_221 = h2co_322_221(logdensity=density,
logcolumn=column,
temperature=temperature,
sigmav=width)
Tex321_220, tau321_220 = h2co_321_220(logdensity=density,
logcolumn=column,
temperature=temperature,
sigmav=width)
tex = [Tex303_202, Tex322_221, Tex321_220]
tau = [tau303_202, tau322_221, tau321_220]
minfreq = [218.15, 218.40, 218.7]
maxfreq = [218.25, 218.55, 218.8]
spec = np.sum([
(formaldehyde_mm_vtau(xarr, Tex=float(tex[ii]), tau=float(tau[ii]),
xoff_v=xoff_v, width=width, **kwargs)
* (xarr.as_unit('GHz').value>minfreq[ii]) *
(xarr.as_unit('GHz').value<maxfreq[ii])) for ii in xrange(len(tex))],
axis=0)
return spec
def formaldehyde_mm_radex(xarr,
temperature=25,
column=13,
density=4,
xoff_v=0.0,
width=1.0,
grid_vwidth=1.0,
texgrid=None,
taugrid=None,
hdr=None,
path_to_texgrid='',
path_to_taugrid='',
debug=False,
verbose=False,
**kwargs):
"""
Use a grid of RADEX-computed models to make a model line spectrum
The RADEX models have to be available somewhere.
OR they can be passed as arrays. If as arrays, the form should be:
texgrid = ((minfreq1,maxfreq1,texgrid1),(minfreq2,maxfreq2,texgrid2))
xarr must be a SpectroscopicAxis instance
xoff_v, width are both in km/s
Parameters
----------
grid_vwidth : float
the velocity assumed when computing the grid in km/s
this is important because tau = modeltau / width (see, e.g.,
Draine 2011 textbook pgs 219-230)
density : float
Density!
"""
if texgrid is None and taugrid is None:
if path_to_texgrid == '' or path_to_taugrid=='':
raise IOError("Must specify model grids to use.")
else:
taugrid = [pyfits.getdata(path_to_taugrid)]
texgrid = [pyfits.getdata(path_to_texgrid)]
hdr = pyfits.getheader(path_to_taugrid)
zinds,yinds,xinds = np.indices(taugrid[0].shape)
if 'CD1_1' in hdr:
cd11 = 'CD1_1'
cd22 = 'CD2_2'
else:
cd11 = 'CDELT1'
cd22 = 'CDELT2'
densityarr = (xinds+hdr['CRPIX1']-1)*hdr[cd11]+hdr['CRVAL1'] # log density
columnarr = (yinds+hdr['CRPIX2']-1)*hdr[cd22]+hdr['CRVAL2'] # log column
temparr = (zinds+hdr['CRPIX3']-1)*hdr['CDELT3']+hdr['CRVAL3'] # lin temperature
minfreq = (218.,)
maxfreq = (219.,)
elif len(taugrid)==len(texgrid) and hdr is not None:
minfreq,maxfreq,texgrid = zip(*texgrid)
minfreq,maxfreq,taugrid = zip(*taugrid)
zinds,yinds,xinds = np.indices(taugrid[0].shape)
if 'CD1_1' in hdr:
cd11 = 'CD1_1'
cd22 = 'CD2_2'
else:
cd11 = 'CDELT1'
cd22 = 'CDELT2'
densityarr = (xinds+hdr['CRPIX1']-1)*hdr[cd11]+hdr['CRVAL1'] # log density
columnarr = (yinds+hdr['CRPIX2']-1)*hdr[cd22]+hdr['CRVAL2'] # log column
temparr = (zinds+hdr['CRPIX3']-1)*hdr['CDELT3']+hdr['CRVAL3'] # lin temperature
else:
raise Exception
# Convert X-units to frequency in GHz
xarr = xarr.as_unit('Hz', quiet=True)
#tau_nu_cumul = np.zeros(len(xarr))
gridval1 = np.interp(density, densityarr[0,0,:], xinds[0,0,:])
gridval2 = np.interp(column, columnarr[0,:,0], yinds[0,:,0])
gridval3 = np.interp(temperature, temparr[:,0,0], zinds[:,0,0])
if np.isnan(gridval1) or np.isnan(gridval2) or np.isnan(gridval3):
raise ValueError("Invalid column/density")
if scipyOK:
# this is mostly a trick for speed: slice so you only have two thin layers to interpolate
# between
#slices = [density_gridnumber] + [slice(np.floor(gv),np.floor(gv)+2) for gv in (gridval2,gridval1)]
slices = [slice(np.floor(gridval3),np.floor(gridval3)+2),
slice(np.floor(gridval2),np.floor(gridval2)+2),
slice(np.floor(gridval1),np.floor(gridval1)+2)
]
tau = [scipy.ndimage.map_coordinates(tg[slices],
np.array([[gridval3%1],[gridval2%1],[gridval1%1]]),order=1) for tg in taugrid]
tex = [scipy.ndimage.map_coordinates(tg[slices],
np.array([[gridval3%1],[gridval2%1],[gridval1%1]]),order=1) for tg in texgrid]
else:
raise ImportError("Couldn't import scipy, therefore cannot interpolate")
#tau = modelgrid.line_params_2D(gridval1,gridval2,densityarr,columnarr,taugrid[temperature_gridnumber,:,:])
#tex = modelgrid.line_params_2D(gridval1,gridval2,densityarr,columnarr,texgrid[temperature_gridnumber,:,:])
if verbose:
for ta,tk in zip(tau,tex):
print("density %20.12g temperature %20.12g column %20.12g: tau %20.12g tex %20.12g" % (density, temperature, column, ta, tk))
if debug:
import pdb; pdb.set_trace()
spec = np.sum([
(formaldehyde_mm_vtau(xarr, Tex=float(tex[ii]), tau=float(tau[ii]),
xoff_v=xoff_v, width=width, **kwargs)
* (xarr.as_unit('GHz').value>minfreq[ii]) *
(xarr.as_unit('GHz').value<maxfreq[ii])) for ii in xrange(len(tex))],
axis=0)
return spec
def formaldehyde_mm(xarr, amp=1.0, xoff_v=0.0, width=1.0,
return_components=False ):
"""
Generate a model Formaldehyde spectrum based on simple gaussian parameters
the "amplitude" is an essentially arbitrary parameter; we therefore define it to be Tex given tau=0.01 when
passing to the fitter
The final spectrum is then rescaled to that value
The components are independent, but with offsets set by frequency... in principle.
"""
mdl = formaldehyde_vtau(xarr, Tex=amp*0.01, tau=0.01, xoff_v=xoff_v,
width=width,
return_components=return_components)
if return_components:
mdlpeak = np.abs(mdl).squeeze().sum(axis=0).max()
else:
mdlpeak = np.abs(mdl).max()
if mdlpeak > 0:
mdl *= amp/mdlpeak
return mdl
class formaldehyde_mm_model(model.SpectralModel):
pass
formaldehyde_mm_fitter = formaldehyde_mm_model(formaldehyde_mm, 3,
parnames=['amp','center','width'],
parlimited=[(False,False),(False,False), (True,False)],
parlimits=[(0,0), (0,0), (0,0)],
shortvarnames=("A","v","\\sigma"), # specify the parameter names (TeX is OK)
fitunit='Hz' )
formaldehyde_mm_vheight_fitter = formaldehyde_mm_model(fitter.vheightmodel(formaldehyde_mm), 4,
parnames=['height','amp','center','width'],
parlimited=[(False,False),(False,False),(False,False), (True,False)],
parlimits=[(0,0), (0,0), (0,0), (0,0)],
shortvarnames=("H","A","v","\\sigma"), # specify the parameter names (TeX is OK)
fitunit='Hz' )
try:
import pymodelfit
class pmfFormaldehydeModel(pymodelfit.FunctionModel1DAuto):
def f(self, x, amp0=1.0, xoff_v0=0.0,width0=1.0):
return formaldehyde(x,
amp=amp0,
xoff_v=xoff_v0,width=width0)
class pmfFormaldehydeModelVtau(pymodelfit.FunctionModel1DAuto):
def f(self, x, Tex0=1.0, tau0=0.01, xoff_v0=0.0, width0=1.0):
return formaldehyde_vtau(x,
Tex=Tex0, tau=tau0,
xoff_v=xoff_v0,width=width0)
except ImportError:
pass
|
vlas-sokolov/pyspeckit
|
pyspeckit/spectrum/models/formaldehyde_mm.py
|
Python
|
mit
| 18,938
|
[
"Gaussian"
] |
c54f9a109b285d3988798c22dcd7ae7e340de253a4bd30d8e3fec4a73e705d77
|
# Copyright 2015 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for modules/usage_reporting/*"""
__author__ = 'Mike Gainer (mgainer@google.com)'
from common import utils as common_utils
from common import users
from models import data_removal as models_data_removal
from models import models
from models import student_work
from modules.analytics import student_aggregate
from modules.data_removal import data_removal
from modules.data_removal import removal_models
from modules.gitkit import gitkit
from modules.invitation import invitation
from modules.questionnaire import questionnaire
from modules.review import domain
from modules.review import peer
from modules.skill_map import competency
from modules.unsubscribe import unsubscribe
from tests.functional import actions
from google.appengine.ext import db
class DataRemovalTestBase(actions.TestBase):
def setUp(self):
super(DataRemovalTestBase, self).setUp()
# If the optional wipeout module is present, it will enforce some
# requirements that we're not prepared to construct in core
# Course Builder. Unilaterally remove its registrations.
event_callbacks = models.StudentLifecycleObserver.EVENT_CALLBACKS
for event_type in event_callbacks:
if 'wipeout' in event_callbacks[event_type]:
del event_callbacks[event_type]['wipeout']
enqueue_callbacks = models.StudentLifecycleObserver.EVENT_CALLBACKS
for event_type in enqueue_callbacks:
if 'wipeout' in enqueue_callbacks[event_type]:
del enqueue_callbacks[event_type]['wipeout']
def _unregister_and_request_data_removal(self, course):
response = self.get('/%s/student/home' % course)
response = self.click(response, 'Unenroll')
self.assertIn('to unenroll from', response.body)
form = response.form
form['data_removal'].checked = True
form.action = self.canonicalize(form.action, response)
response = form.submit()
form = response.form
form.action = self.canonicalize(form.action, response)
response = form.submit('data_removal')
self.assertIn('You have been unenrolled', response.body)
def _complete_removal(self):
# Remove indexed items, add to-do items for map/reduce.
task_count = self.execute_all_deferred_tasks(
models.StudentLifecycleObserver.QUEUE_NAME)
# Add map/reduce jobs on default queue
response = self.get(
data_removal.DataRemovalCronHandler.URL,
headers={'X-AppEngine-Cron': 'True'})
# Run map/reduce jobs
self.execute_all_deferred_tasks()
# Final call to cron to do cleanup once map/reduce work items done.
response = self.get(
data_removal.DataRemovalCronHandler.URL,
headers={'X-AppEngine-Cron': 'True'})
class DataRemovalTests(DataRemovalTestBase):
COURSE = 'data_removal_test'
NAMESPACE = 'ns_' + COURSE
ADMIN_EMAIL = 'admin@foo.com'
STUDENT_EMAIL = 'student@foo.com'
def setUp(self):
super(DataRemovalTests, self).setUp()
app_context = actions.simple_add_course(
self.COURSE, self.ADMIN_EMAIL, 'Data Removal Test')
def test_cron_handler_requires_reserved_header(self):
response = self.get(
data_removal.DataRemovalCronHandler.URL, expect_errors=True)
self.assertEquals(403, response.status_int)
self.assertEquals('Forbidden.', response.body)
def test_cron_handler_ok_when_no_work_to_do(self):
response = self.get(
data_removal.DataRemovalCronHandler.URL,
headers={'X-AppEngine-Cron': 'True'})
self.assertEquals(200, response.status_int)
self.assertEquals('OK.', response.body)
def test_non_removal_policy(self):
with actions.OverriddenEnvironment({
data_removal.DATA_REMOVAL_SETTINGS_SECTION: {
data_removal.REMOVAL_POLICY:
data_removal.IndefiniteRetentionPolicy.get_name()}}):
user = actions.login(self.STUDENT_EMAIL)
actions.register(self, self.STUDENT_EMAIL, course=self.COURSE)
with common_utils.Namespace(self.NAMESPACE):
# After registration, we should have a student object, and no
# ImmediateRemovalState instance due to the don't-care policy.
student = models.Student.get_by_user(user)
self.assertIsNotNone(student)
self.assertIsNone(
removal_models.ImmediateRemovalState.get_by_user_id(
student.user_id))
r = removal_models.BatchRemovalState.get_by_user_ids(
[student.user_id])
self.assertEqual([None], r)
actions.unregister(self, course=self.COURSE)
# Expect to see register, unregister events on queue.
task_count = self.execute_all_deferred_tasks(
models.StudentLifecycleObserver.QUEUE_NAME)
self.assertEquals(2, task_count)
# Running deletion cycle should have no effect. Verify that.
self._complete_removal()
with common_utils.Namespace(self.NAMESPACE):
# After unregister, we should still have a student object.
student = models.Student.get_by_user(user)
self.assertIsNotNone(student)
self.assertIsNone(
removal_models.ImmediateRemovalState.get_by_user_id(
student.user_id))
r = removal_models.BatchRemovalState.get_by_user_ids(
[student.user_id])
self.assertEqual([None], r)
def test_immediate_removal_policy(self):
user = actions.login(self.STUDENT_EMAIL)
actions.register(self, self.STUDENT_EMAIL, course=self.COURSE)
task_count = self.execute_all_deferred_tasks(
models.StudentLifecycleObserver.QUEUE_NAME)
self.assertEquals(1, task_count) # registration.
user_id = None
with common_utils.Namespace(self.NAMESPACE):
# After registration, we should have a student object, and
# a ImmediateRemovalState instance, and no to-do deletion work.
student = models.Student.get_by_user(user)
self.assertIsNotNone(student)
user_id = student.user_id
removal_state = removal_models.ImmediateRemovalState.get_by_user_id(
user_id)
self.assertIsNotNone(removal_state)
self.assertEquals(
removal_models.ImmediateRemovalState.STATE_REGISTERED,
removal_state.state)
r = removal_models.BatchRemovalState.get_by_user_ids([user_id])
self.assertEqual([None], r)
# Add an EventEntity record so we can see it being removed.
event = models.EventEntity(user_id=user_id, source='test')
event.put()
self._unregister_and_request_data_removal(self.COURSE)
with common_utils.Namespace(self.NAMESPACE):
# Immediately upon unregistration, we should still have the student
# record, and removal state should be pending deletion.
student = models.Student.get_by_user(user)
self.assertIsNotNone(student)
removal_state = removal_models.ImmediateRemovalState.get_by_user_id(
user_id)
self.assertIsNotNone(removal_state)
self.assertEquals(
removal_models.ImmediateRemovalState.STATE_DELETION_PENDING,
removal_state.state)
r = removal_models.BatchRemovalState.get_by_user_ids([user_id])
self.assertEqual([None], r)
events = list(models.EventEntity.all().run())
self.assertEquals(1, len(events))
# We should have gotten a to-do item on the task queue for student
# removal.
task_count = self.execute_all_deferred_tasks(
models.StudentLifecycleObserver.QUEUE_NAME)
self.assertEquals(1, task_count) # unregistration.
with common_utils.Namespace(self.NAMESPACE):
# Having processed the queue item, the student record should now
# be gone.
students = list(models.Student.all().run())
student = models.Student.get_by_user(user)
self.assertIsNone(student)
# But the record tracking removal should not yet be gone.
removal_state = removal_models.ImmediateRemovalState.get_by_user_id(
user_id)
self.assertIsNotNone(removal_state)
self.assertEquals(
removal_models.ImmediateRemovalState.STATE_DELETION_PENDING,
removal_state.state)
# And we should have a to-do item for the cron batch cleanup.
r = removal_models.BatchRemovalState.get_by_user_ids([user_id])
self.assertEquals(1, len(r))
removal_record = r[0]
self.assertEquals(
models_data_removal.Registry.get_unindexed_class_names(),
removal_record.resource_types)
# Events won't have been cleaned up yet; need cron batch to run.
events = list(models.EventEntity.all().run())
self.assertEquals(1, len(events))
# Call the cron handler to schedule batch removal tasks. This, in
# turn, will schedule map/reduce jobs to remove records for that
# student.
response = self.get(
data_removal.DataRemovalCronHandler.URL,
headers={'X-AppEngine-Cron': 'True'})
self.assertEquals(200, response.status_int)
self.assertEquals('OK.', response.body)
# Run the map/reduce jobs to completion.
self.execute_all_deferred_tasks()
# We should now be nearly clean; in the normal course of events, only
# the ImmediateRemovalState should still be present. However, due to
# race conditions, an analysis map/reduce job may have finished in the
# meantime, and written a per-student record. Add such a record.
with common_utils.Namespace(self.NAMESPACE):
student = models.Student.get_by_user(user)
self.assertIsNone(student)
removal_state = removal_models.ImmediateRemovalState.get_by_user_id(
user_id)
self.assertIsNotNone(removal_state)
# Events should now be gone.
events = list(models.EventEntity.all().run())
self.assertEquals(0, len(events))
# Cron batch cleanup record should be present, but now empty.
r = removal_models.BatchRemovalState.get_by_user_ids([user_id])
self.assertEquals(1, len(r))
removal_record = r[0]
self.assertEquals([], removal_record.resource_types)
# Simulate map/reduce finishing asychronously & adding a per-student
# item. Verify that the record is present so we know the test
# below that checks for it being gone is correct.
student_aggregate.StudentAggregateEntity(key_name=user_id).put()
a = student_aggregate.StudentAggregateEntity.get_by_key_name(
user_id)
self.assertIsNotNone(a)
# Call the cron handler one more time. Because the batch work item
# is empty, this should do one more round of cleanup on items indexed
# by user id.
response = self.get(
data_removal.DataRemovalCronHandler.URL,
headers={'X-AppEngine-Cron': 'True'})
self.assertEquals(200, response.status_int)
self.assertEquals('OK.', response.body)
# We should now have zero data about the user.
with common_utils.Namespace(self.NAMESPACE):
student = models.Student.get_by_user(user)
self.assertIsNone(student)
removal_state = removal_models.ImmediateRemovalState.get_by_user_id(
user_id)
self.assertIsNone(removal_state)
# Events should now be gone.
events = list(models.EventEntity.all().run())
self.assertEquals(0, len(events))
# Cron batch cleanup record should be gone.
r = removal_models.BatchRemovalState.get_by_user_ids([user_id])
self.assertEqual([None], r)
# Map/reduce results should be gone.
a = student_aggregate.StudentAggregateEntity.get_by_key_name(
user_id)
self.assertIsNone(a)
def test_multiple_students(self):
# Register two students
user = actions.login(self.STUDENT_EMAIL)
actions.register(self, user.email(), course=self.COURSE)
other_user = actions.login('student002@foo.com')
actions.register(self, other_user.email(), course=self.COURSE)
# Get IDs of those students; make an event for each.
with common_utils.Namespace(self.NAMESPACE):
student1_id = (
models.Student.get_by_user(user).user_id)
student2_id = (
models.Student.get_by_user(other_user).user_id)
models.EventEntity(user_id=student1_id, source='test').put()
models.EventEntity(user_id=student2_id, source='test').put()
# Unregister one of them.
actions.login(self.STUDENT_EMAIL)
self._unregister_and_request_data_removal(self.COURSE)
self._complete_removal()
# Unregistered student and his data are gone; still-registered
# student's data is still present.
with common_utils.Namespace(self.NAMESPACE):
self.assertIsNone(models.Student.get_by_user(user))
self.assertIsNotNone(models.Student.get_by_user(other_user))
entities = list(models.EventEntity.all().run())
self.assertEquals(1, len(entities))
self.assertEquals(student2_id, entities[0].user_id)
def test_multiple_courses(self):
COURSE_TWO = 'course_two'
COURSE_TWO_NS = 'ns_' + COURSE_TWO
# Slight cheat: Register gitkit data remover manually, rather than
# enabling the entire module, which disrupts normal functional test
# user login handling
gitkit.EmailMapping.register_for_data_removal()
actions.simple_add_course(
COURSE_TWO, self.ADMIN_EMAIL, 'Data Removal Test Two')
user = actions.login(self.STUDENT_EMAIL)
with actions.OverriddenConfig(models.CAN_SHARE_STUDENT_PROFILE.name,
True):
actions.register(self, user.email(), course=self.COURSE)
actions.register(self, user.email(), course=COURSE_TWO)
# Slight cheat: Rather than enabling gitkit module, just call
# the method that will insert the EmailMapping row.
gitkit.EmailUpdatePolicy.apply(user)
# Global profile object(s) should now exist.
profile = models.StudentProfileDAO.get_profile_by_user_id(
user.user_id())
self.assertIsNotNone(profile)
email_policy = gitkit.EmailMapping.get_by_user_id(user.user_id())
self.assertIsNotNone(email_policy)
# Unregister from 'data_removal_test' course.
self._unregister_and_request_data_removal(self.COURSE)
self._complete_removal()
# Student object should be gone from data_removal_test course, but
# not from course_two.
with common_utils.Namespace(self.NAMESPACE):
self.assertIsNone(models.Student.get_by_user(user))
with common_utils.Namespace(COURSE_TWO_NS):
self.assertIsNotNone(models.Student.get_by_user(user))
# Global profile object(s) should still exist.
profile = models.StudentProfileDAO.get_profile_by_user_id(
user.user_id())
self.assertIsNotNone(profile)
email_policy = gitkit.EmailMapping.get_by_user_id(user.user_id())
self.assertIsNotNone(email_policy)
# Unregister from other course.
self._unregister_and_request_data_removal(COURSE_TWO)
self._complete_removal()
# Both Student objects should now be gone.
with common_utils.Namespace(self.NAMESPACE):
self.assertIsNone(models.Student.get_by_user(user))
with common_utils.Namespace(COURSE_TWO_NS):
self.assertIsNone(models.Student.get_by_user(user))
# Global profile object(s) should also be gone.
profile = models.StudentProfileDAO.get_profile_by_user_id(
user.user_id())
self.assertIsNone(profile)
email_policy = gitkit.EmailMapping.get_by_user_id(user.user_id())
self.assertIsNone(email_policy)
def test_records_indexed_by_user_id_removed(self):
"""Test a sampling of types whose index is or contains the user ID."""
user_id = None
user = actions.login(self.STUDENT_EMAIL)
actions.register(self, self.STUDENT_EMAIL, course=self.COURSE)
# Get IDs of those students; make an event for each.
with common_utils.Namespace(self.NAMESPACE):
student = models.Student.get_by_user(user)
user_id = student.user_id
# Indexed by user ID suffixed with a string.
p = models.StudentPropertyEntity.create(student, 'foo')
p.value = 'foo'
p.put()
invitation.InvitationStudentProperty.load_or_default(student).put()
questionnaire.StudentFormEntity.load_or_default(
student, 'a_form').put()
# User ID plus skill name.
cm = competency.BaseCompetencyMeasure.load(user_id, 1)
cm.save()
# models.student_work.KeyProperty - a foreign key to Student.
reviewee_key = db.Key.from_path(models.Student.kind(), user_id)
reviewer_key = db.Key.from_path(models.Student.kind(), 'xyzzy')
student_work.Review(contents='abcdef', reviewee_key=reviewee_key,
reviewer_key=reviewer_key, unit_id='7').put()
submission_key = student_work.Submission(
unit_id='7', reviewee_key=reviewee_key).put()
peer.ReviewSummary(submission_key=submission_key,
reviewee_key=reviewee_key, unit_id='7').put()
peer.ReviewStep(
submission_key=submission_key, reviewee_key=reviewee_key,
reviewer_key=reviewer_key, unit_id='7',
state=domain.REVIEW_STATE_ASSIGNED,
assigner_kind=domain.ASSIGNER_KIND_AUTO).put()
# Assure ourselves that we have all of the items we just added.
with common_utils.Namespace(self.NAMESPACE):
l = list(models.StudentPropertyEntity.all().run())
self.assertEquals(2, len(l)) # 'foo', 'linear-course-completion'
l = list(invitation.InvitationStudentProperty.all().run())
self.assertEquals(1, len(l))
l = list(questionnaire.StudentFormEntity.all().run())
self.assertEquals(1, len(l))
l = list(competency.CompetencyMeasureEntity.all().run())
self.assertEquals(1, len(l))
l = list(student_work.Review.all().run())
self.assertEquals(1, len(l))
l = list(student_work.Submission.all().run())
self.assertEquals(1, len(l))
l = list(peer.ReviewSummary.all().run())
self.assertEquals(1, len(l))
l = list(peer.ReviewStep.all().run())
self.assertEquals(1, len(l))
self._unregister_and_request_data_removal(self.COURSE)
self._complete_removal()
# Assure ourselves that all added items are now gone.
with common_utils.Namespace(self.NAMESPACE):
l = list(models.StudentPropertyEntity.all().run())
self.assertEquals(0, len(l))
l = list(invitation.InvitationStudentProperty.all().run())
self.assertEquals(0, len(l))
l = list(questionnaire.StudentFormEntity.all().run())
self.assertEquals(0, len(l))
l = list(competency.CompetencyMeasureEntity.all().run())
self.assertEquals(0, len(l))
l = list(student_work.Review.all().run())
self.assertEquals(0, len(l))
l = list(student_work.Submission.all().run())
self.assertEquals(0, len(l))
l = list(peer.ReviewSummary.all().run())
self.assertEquals(0, len(l))
l = list(peer.ReviewStep.all().run())
self.assertEquals(0, len(l))
def test_remove_by_email(self):
user = actions.login(self.STUDENT_EMAIL)
actions.register(self, user.email(), course=self.COURSE)
# Get IDs of those students; make an event for each.
with common_utils.Namespace(self.NAMESPACE):
sse = unsubscribe.SubscriptionStateEntity(
key_name=user.email())
sse.save()
self._unregister_and_request_data_removal(self.COURSE)
self._complete_removal()
with common_utils.Namespace(self.NAMESPACE):
l = list(unsubscribe.SubscriptionStateEntity.all().run())
self.assertEquals(0, len(l))
class UserInteractionTests(DataRemovalTestBase):
COURSE = 'data_removal_test'
NAMESPACE = 'ns_' + COURSE
ADMIN_EMAIL = 'admin@foo.com'
STUDENT_EMAIL = 'student@foo.com'
def setUp(self):
super(UserInteractionTests, self).setUp()
app_context = actions.simple_add_course(
self.COURSE, self.ADMIN_EMAIL, 'Data Removal Test')
self.base = '/' + self.COURSE
def test_unregister_hides_deletion_option_when_no_deletion_policy(self):
actions.login(self.STUDENT_EMAIL)
actions.register(self, self.STUDENT_EMAIL)
with actions.OverriddenEnvironment({
data_removal.DATA_REMOVAL_SETTINGS_SECTION: {
data_removal.REMOVAL_POLICY:
data_removal.IndefiniteRetentionPolicy.get_name()}}):
response = self.get('student/unenroll')
self.assertNotIn('Remove all my data from the course', response.body)
def test_unregister_shows_deletion_option_when_deletion_possible(self):
actions.login(self.STUDENT_EMAIL)
actions.register(self, self.STUDENT_EMAIL)
response = self.get('student/unenroll')
self.assertIn('Delete all associated data', response.body)
def test_unregister_without_deletion_permits_reregistration(self):
actions.login(self.STUDENT_EMAIL)
actions.register(self, self.STUDENT_EMAIL)
actions.unregister(self)
actions.register(self, self.STUDENT_EMAIL)
def _unregister_flow(self, response,
with_deletion_checked=False,
cancel_on_unregister=False,
cancel_on_deletion=False):
unregistration_expected = (not cancel_on_unregister and
not cancel_on_deletion)
data_deletion_expected = (unregistration_expected and
with_deletion_checked)
# Caller should have arranged for us to be at the unregister form.
form = response.form
if with_deletion_checked:
form['data_removal'].checked = True
if cancel_on_unregister:
response = self.click(response, "No")
return response
# Submit unregister form.
response = form.submit()
if with_deletion_checked:
self.assertIn(
'Once you delete your data, there is no way to recover it.',
response.body)
form = response.form
form.action = self.canonicalize(form.action, response)
if cancel_on_deletion:
response = form.submit('cancel_removal').follow()
self.assertIn(
'To leave the course permanently, click on Unenroll',
response.body)
else:
response = form.submit('data_removal')
self.assertIn('You have been unenrolled', response.body)
# Try to visit student's profile - verify can or can't depending
# on whether we unregistered the student.
response = self.get('student/home')
if unregistration_expected:
self.assertEquals(response.status_int, 302)
self.assertEquals(response.location,
'http://localhost/%s/course' % self.COURSE)
response = response.follow()
self.assertEquals(response.status_int, 200)
else:
self.assertEquals(response.status_int, 200) # not 302 to /course
# Run pipeline which might do deletion to ensure we are really
# giving the code the opportunity to do the deletion before we
# check whether the Student is not gone.
self._complete_removal()
with common_utils.Namespace(self.NAMESPACE):
user = users.get_current_user()
if data_deletion_expected:
self.assertIsNone(models.Student.get_by_user(user))
else:
self.assertIsNotNone(models.Student.get_by_user(user))
def _deletion_flow_for_unregistered_student(self, response, cancel):
self.assertIn(
'Once you delete your data, there is no way to recover it.',
response.body)
form = response.form
form.action = self.canonicalize(form.action, response)
if cancel:
response = form.submit('cancel_removal')
# Verify redirected back to /course page in either case.
self.assertEquals(response.status_int, 302)
self.assertEquals(response.location,
'http://localhost/%s/student/home' % self.COURSE)
response = response.follow()
self.assertEquals(response.status_int, 302)
self.assertEquals(response.location,
'http://localhost/%s/course' % self.COURSE)
response = response.follow()
self.assertEquals(response.status_int, 200)
else:
response = form.submit('data_removal')
self.assertEquals(response.status_int, 302)
self.assertEquals(response.location,
'http://localhost/%s/' % self.COURSE)
response = response.follow()
self.assertEquals(response.status_int, 200)
# Run pipeline which might do deletion to ensure we are really
# giving the code the opportunity to do the deletion before we
# check whether the Student is not gone.
self._complete_removal()
with common_utils.Namespace(self.NAMESPACE):
user = users.get_current_user()
if cancel:
self.assertIsNotNone(models.Student.get_by_user(user))
else:
self.assertIsNone(models.Student.get_by_user(user))
def test_unregister_then_cancel_does_not_unregister_or_delete(self):
actions.login(self.STUDENT_EMAIL)
actions.register(self, self.STUDENT_EMAIL)
response = self.get('student/unenroll')
self._unregister_flow(response, cancel_on_unregister=True)
def test_unregister_without_deletion_unregisters_but_does_not_delete(self):
actions.login(self.STUDENT_EMAIL)
actions.register(self, self.STUDENT_EMAIL)
response = self.get('student/unenroll')
self._unregister_flow(response)
def test_unregister_with_deletion_then_cancel_does_not_unregister(self):
actions.login(self.STUDENT_EMAIL)
actions.register(self, self.STUDENT_EMAIL)
response = self.get('student/unenroll')
self._unregister_flow(response, with_deletion_checked=True,
cancel_on_deletion=True)
def test_unregister_with_deletion_does_deletion(self):
user = actions.login(self.STUDENT_EMAIL)
actions.register(self, self.STUDENT_EMAIL)
response = self.get('student/unenroll')
self._unregister_flow(response, with_deletion_checked=True)
def test_delete_link_in_footer_not_present_when_not_logged_in(self):
response = self.get('course')
self.assertNotIn('Delete My Data', response.body)
def test_delete_link_in_footer_not_present_when_not_registered(self):
actions.login(self.STUDENT_EMAIL)
response = self.get('course')
self.assertNotIn('Delete My Data', response.body)
def test_delete_link_when_registered_then_cancel_unregister(self):
actions.login(self.STUDENT_EMAIL)
actions.register(self, self.STUDENT_EMAIL)
response = self.get('course')
response = self.click(response, 'Delete My Data')
self._unregister_flow(response, cancel_on_unregister=True)
def test_delete_link_when_registered_then_cancel_deletion(self):
actions.login(self.STUDENT_EMAIL)
actions.register(self, self.STUDENT_EMAIL)
response = self.get('course')
response = self.click(response, 'Delete My Data')
self._unregister_flow(response, with_deletion_checked=True,
cancel_on_deletion=True)
def test_delete_link_when_registered_then_unregister_without_deletion(self):
actions.login(self.STUDENT_EMAIL)
actions.register(self, self.STUDENT_EMAIL)
response = self.get('course')
response = self.click(response, 'Delete My Data')
self._unregister_flow(response)
def test_delete_link_when_registered_then_proceed_and_delete(self):
actions.login(self.STUDENT_EMAIL)
actions.register(self, self.STUDENT_EMAIL)
response = self.get('course')
response = self.click(response, 'Delete My Data')
self._unregister_flow(response, with_deletion_checked=True)
def test_delete_link_when_unregistered_then_cancel(self):
user = actions.login(self.STUDENT_EMAIL)
actions.register(self, self.STUDENT_EMAIL)
actions.unregister(self)
response = self.get('course')
response = self.click(response, 'Delete My Data')
self._deletion_flow_for_unregistered_student(response, cancel=True)
response = self.get('course')
self.assertIn('Delete My Data', response.body)
def test_delete_link_when_unregistered_then_proceed(self):
user = actions.login(self.STUDENT_EMAIL)
actions.register(self, self.STUDENT_EMAIL)
actions.unregister(self)
response = self.get('course')
response = self.click(response, 'Delete My Data')
self._deletion_flow_for_unregistered_student(response, cancel=False)
response = self.get('course')
self.assertNotIn('Delete My Data', response.body)
def test_reregistration_blocked_during_deletion(self):
def assert_cannot_register():
response = self.get('register')
self.assertIn('You cannot re-register for this course',
response.body)
self.assertNotIn('What is your name?', response.body)
user_id = None
user = actions.login(self.STUDENT_EMAIL)
actions.register(self, user.email())
with common_utils.Namespace(self.NAMESPACE):
# After registration, we should have a student object, and
# a ImmediateRemovalState instance.
student = models.Student.get_by_user(user)
self.assertIsNotNone(student)
user_id = student.user_id
self._unregister_and_request_data_removal(self.COURSE)
# On submitting the unregister form, the user's ImmediateRemovalState
# will have been marked as deltion-in-progress, and so user cannot
# re-register yet.
assert_cannot_register()
# Run the queue to do the cleanup of indexed items, and add the
# work-to-do items for batched cleanup.
self.execute_all_deferred_tasks(
models.StudentLifecycleObserver.QUEUE_NAME)
assert_cannot_register()
# Run the cron job that launches the map/reduce jobs to clean up
# bulk items. Still not able to re-register.
self.get(
data_removal.DataRemovalCronHandler.URL,
headers={'X-AppEngine-Cron': 'True'})
assert_cannot_register()
# Run the map/reduce jobs. Bulk items should now be cleaned.
self.execute_all_deferred_tasks()
with common_utils.Namespace(self.NAMESPACE):
student = models.Student.get_by_user(user)
self.assertIsNone(student)
removal_state = removal_models.ImmediateRemovalState.get_by_user_id(
user_id)
self.assertIsNotNone(removal_state)
assert_cannot_register()
# Run the cron job one more time. When no bulk to-do items remain,
# we then clean up the ImmediateRemovalState. Re-registration should
# now be possible.
self.get(
data_removal.DataRemovalCronHandler.URL,
headers={'X-AppEngine-Cron': 'True'})
with common_utils.Namespace(self.NAMESPACE):
student = models.Student.get_by_user(user)
self.assertIsNone(student)
removal_state = removal_models.ImmediateRemovalState.get_by_user_id(
user_id)
self.assertIsNone(removal_state)
actions.register(self, self.STUDENT_EMAIL)
|
ram8647/gcb-mobilecsp
|
modules/data_removal/data_removal_tests.py
|
Python
|
apache-2.0
| 34,158
|
[
"VisIt"
] |
bee44c890d9e597bf7726acc483a6e519999e6017aff96beeffe81acab9ae0d7
|
# class generated by DeVIDE::createDeVIDEModuleFromVTKObject
from module_kits.vtk_kit.mixins import SimpleVTKClassModuleBase
import vtk
class vtkTIFFReader(SimpleVTKClassModuleBase):
def __init__(self, module_manager):
SimpleVTKClassModuleBase.__init__(
self, module_manager,
vtk.vtkTIFFReader(), 'Reading vtkTIFF.',
(), ('vtkTIFF',),
replaceDoc=True,
inputFunctions=None, outputFunctions=None)
|
nagyistoce/devide
|
modules/vtk_basic/vtkTIFFReader.py
|
Python
|
bsd-3-clause
| 468
|
[
"VTK"
] |
c965c1b094242d229b47b0e17308b5186061a7334a05421c16f974468f979783
|
# encoding: utf-8
from __future__ import unicode_literals
import json
import mock
import datetime
import pytest
import responses
from nose.tools import * # noqa
from dateutil.parser import parse as parse_datetime
from website import settings
from addons.osfstorage.models import OsfStorageFileNode, OsfStorageFolder
from framework.auth.core import Auth
from addons.osfstorage.tests.utils import (
StorageTestCase, Delta, AssertDeltas,
recursively_create_file,
)
from addons.osfstorage.tests import factories
from addons.osfstorage.tests.utils import make_payload
from framework.auth import signing
from website.util import rubeus, api_url_for
from framework.auth import cas
from osf.models import Tag, QuickFilesNode
from osf.models import files as models
from addons.osfstorage.apps import osf_storage_root
from addons.osfstorage import utils
from addons.base.views import make_auth
from addons.osfstorage import settings as storage_settings
from api_tests.utils import create_test_file
from osf_tests.factories import ProjectFactory, ApiOAuth2PersonalTokenFactory, PreprintFactory
def create_record_with_version(path, node_settings, **kwargs):
version = factories.FileVersionFactory(**kwargs)
node_settings.get_root().append_file(path)
record.versions.append(version)
record.save()
return record
@pytest.mark.django_db
class HookTestCase(StorageTestCase):
def send_hook(self, view_name, view_kwargs, payload, target, method='get', **kwargs):
method = getattr(self.app, method)
guid = view_kwargs.pop('guid', None) or target._id
return method(
api_url_for(view_name, guid=guid, **view_kwargs),
signing.sign_data(signing.default_signer, payload),
**kwargs
)
@pytest.mark.django_db
class TestGetMetadataHook(HookTestCase):
def test_empty(self):
res = self.send_hook(
'osfstorage_get_children',
{'fid': self.node_settings.get_root()._id, 'user_id': self.user._id},
{},
self.node
)
assert_true(isinstance(res.json, list))
assert_equal(res.json, [])
def test_file_metdata(self):
path = u'kind/of/magíc.mp3'
record = recursively_create_file(self.node_settings, path)
version = factories.FileVersionFactory()
record.versions.add(version)
record.save()
res = self.send_hook(
'osfstorage_get_metadata',
{'fid': record.parent._id},
{},
self.node
)
assert_true(isinstance(res.json, dict))
assert_equal(res.json, record.parent.serialize(True))
def test_children_metadata(self):
path = u'kind/of/magíc.mp3'
record = recursively_create_file(self.node_settings, path)
version = factories.FileVersionFactory()
record.versions.add(version)
record.save()
res = self.send_hook(
'osfstorage_get_children',
{'fid': record.parent._id, 'user_id': self.user._id},
{},
self.node
)
assert_equal(len(res.json), 1)
res_data = res.json[0]
expected_data = record.serialize()
# Datetimes in response might not be exactly the same as in record.serialize
# because of the way Postgres serializes dates. For example,
# '2017-06-05T17:32:20.964950+00:00' will be
# serialized as '2017-06-05T17:32:20.96495+00:00' by postgres
# Therefore, we parse the dates then compare them
expected_date_modified = parse_datetime(expected_data.pop('modified'))
expected_date_created = parse_datetime(expected_data.pop('created'))
res_date_modified = parse_datetime(res_data.pop('modified'))
res_date_created = parse_datetime(res_data.pop('created'))
# latestVersionSeen should not be present in record.serialize, because it has to do
# with the user making the request itself, which isn't important when serializing the record
expected_data['latestVersionSeen'] = None
assert_equal(res_date_modified, expected_date_modified)
assert_equal(res_date_created, expected_date_created)
assert_equal(res_data, expected_data)
def test_osf_storage_root(self):
auth = Auth(self.project.creator)
result = osf_storage_root(self.node_settings.config, self.node_settings, auth)
node = self.project
expected = rubeus.build_addon_root(
node_settings=self.node_settings,
name='',
permissions=auth,
user=auth.user,
nodeUrl=node.url,
nodeApiUrl=node.api_url,
)
root = result[0]
assert_equal(root, expected)
def test_root_default(self):
res = self.send_hook('osfstorage_get_metadata', {}, {}, self.node)
assert_equal(res.json['fullPath'], '/')
assert_equal(res.json['id'], self.node_settings.get_root()._id)
def test_metadata_not_found(self):
res = self.send_hook(
'osfstorage_get_metadata',
{'fid': 'somebogusid'}, {},
self.node,
expect_errors=True,
)
assert_equal(res.status_code, 404)
def test_metadata_not_found_lots_of_slashes(self):
res = self.send_hook(
'osfstorage_get_metadata',
{'fid': '/not/fo/u/nd/'}, {},
self.node,
expect_errors=True,
)
assert_equal(res.status_code, 404)
@pytest.mark.django_db
class TestUploadFileHook(HookTestCase):
def setUp(self):
super(TestUploadFileHook, self).setUp()
self.name = 'pízza.png'
self.record = recursively_create_file(self.node_settings, self.name)
self.auth = make_auth(self.user)
def send_upload_hook(self, parent, target=None, payload=None, **kwargs):
return self.send_hook(
'osfstorage_create_child',
{'fid': parent._id},
payload=payload or {},
target=target or self.project,
method='post_json',
**kwargs
)
def make_payload(self, **kwargs):
user = kwargs.pop('user', self.user)
name = kwargs.pop('name', self.name)
return make_payload(user=user, name=name, **kwargs)
def test_upload_create(self):
name = 'slightly-mad'
res = self.send_upload_hook(self.node_settings.get_root(), self.project, self.make_payload(name=name))
assert_equal(res.status_code, 201)
assert_equal(res.json['status'], 'success')
record = self.node_settings.get_root().find_child_by_name(name)
version = models.FileVersion.load(res.json['version'])
assert_equal(version.size, 123)
assert_equal(version.location_hash, 'file')
assert_equal(version.location, {
'object': 'file',
'uname': 'testmachine',
'service': 'filesystem',
'provider': 'filesystem',
storage_settings.WATERBUTLER_RESOURCE: 'blah',
})
assert_equal(version.metadata, {
'size': 123,
'name': 'file',
'base64': '==',
'provider': 'filesystem',
'modified': 'Mon, 16 Feb 2015 18:45:34 GMT'
})
assert_is_not(version, None)
assert_equal([version], list(record.versions.all()))
assert_not_in(version, self.record.versions.all())
assert_equal(record.serialize(), res.json['data'])
assert_equal(res.json['data']['downloads'], self.record.get_download_count())
def test_upload_update(self):
delta = Delta(lambda: self.record.versions.count(), lambda value: value + 1)
with AssertDeltas(delta):
res = self.send_upload_hook(self.node_settings.get_root(), self.project, self.make_payload())
self.record.reload()
assert_equal(res.status_code, 200)
assert_equal(res.json['status'], 'success')
version = models.FileVersion.load(res.json['version'])
assert_is_not(version, None)
assert_in(version, self.record.versions.all())
def test_upload_duplicate(self):
location = {
'service': 'cloud',
storage_settings.WATERBUTLER_RESOURCE: 'osf',
'object': 'file',
}
version = self.record.create_version(self.user, location)
with AssertDeltas(Delta(lambda: self.record.versions.count())):
res = self.send_upload_hook(self.node_settings.get_root(), payload=self.make_payload())
self.record.reload()
assert_equal(res.status_code, 200)
assert_equal(res.json['status'], 'success')
version = models.FileVersion.load(res.json['version'])
assert_is_not(version, None)
assert_in(version, self.record.versions.all())
def test_upload_create_child(self):
name = 'ლ(ಠ益ಠლ).unicode'
parent = self.node_settings.get_root().append_folder('cheesey')
res = self.send_upload_hook(parent, payload=self.make_payload(name=name))
assert_equal(res.status_code, 201)
assert_equal(res.json['status'], 'success')
assert_equal(res.json['data']['downloads'], self.record.get_download_count())
version = models.FileVersion.load(res.json['version'])
assert_is_not(version, None)
assert_not_in(version, self.record.versions.all())
record = parent.find_child_by_name(name)
assert_in(version, record.versions.all())
assert_equals(record.name, name)
assert_equals(record.parent, parent)
def test_upload_create_child_with_same_name(self):
name = 'ლ(ಠ益ಠლ).unicode'
self.node_settings.get_root().append_file(name)
parent = self.node_settings.get_root().append_folder('cheesey')
res = self.send_upload_hook(parent, payload=self.make_payload(name=name))
assert_equal(res.status_code, 201)
assert_equal(res.json['status'], 'success')
assert_equal(res.json['data']['downloads'], self.record.get_download_count())
version = models.FileVersion.load(res.json['version'])
assert_is_not(version, None)
assert_not_in(version, self.record.versions.all())
record = parent.find_child_by_name(name)
assert_in(version, record.versions.all())
assert_equals(record.name, name)
assert_equals(record.parent, parent)
def test_upload_fail_to_create_version_due_to_checkout(self):
user = factories.AuthUserFactory()
name = 'Gunter\'s noise.mp3'
self.node_settings.get_root().append_file(name)
root = self.node_settings.get_root()
file = root.find_child_by_name(name)
file.checkout = user
file.save()
res = self.send_upload_hook(root, payload=self.make_payload(name=name), expect_errors=True)
assert_equal(res.status_code, 403)
def test_update_nested_child(self):
name = 'ლ(ಠ益ಠლ).unicode'
parent = self.node_settings.get_root().append_folder('cheesey')
old_node = parent.append_file(name)
res = self.send_upload_hook(parent, payload=self.make_payload(name=name))
old_node.reload()
new_node = parent.find_child_by_name(name)
assert_equal(res.status_code, 200)
assert_equal(res.json['status'], 'success')
assert_equal(res.json['data']['downloads'], new_node.get_download_count())
assert_equal(old_node, new_node)
version = models.FileVersion.load(res.json['version'])
assert_is_not(version, None)
assert_in(version, new_node.versions.all())
assert_in(version, new_node.versions.all())
assert_equals(new_node.name, name)
assert_equals(new_node.parent, parent)
def test_upload_weird_name(self):
name = 'another/dir/carpe.png'
parent = self.node_settings.get_root().append_folder('cheesey')
res = self.send_upload_hook(parent, payload=self.make_payload(name=name), expect_errors=True)
assert_equal(res.status_code, 400)
assert_equal(len(parent.children), 0)
def test_upload_to_file(self):
name = 'carpe.png'
parent = self.node_settings.get_root().append_file('cheesey')
res = self.send_upload_hook(parent, payload=self.make_payload(name=name), expect_errors=True)
assert_true(parent.is_file)
assert_equal(res.status_code, 400)
def test_upload_no_data(self):
res = self.send_upload_hook(self.node_settings.get_root(), expect_errors=True)
assert_equal(res.status_code, 400)
def test_archive(self):
name = 'ლ(ಠ益ಠლ).unicode'
parent = self.node_settings.get_root().append_folder('cheesey')
res = self.send_upload_hook(parent, payload=self.make_payload(name=name, hashes={'sha256': 'foo'}))
assert_equal(res.status_code, 201)
assert_equal(res.json['status'], 'success')
assert_is(res.json['archive'], True)
res = self.send_hook(
'osfstorage_update_metadata',
{},
target=self.project,
payload={'metadata': {
'vault': 'Vault 101',
'archive': '101 tluaV',
}, 'version': res.json['version']},
method='put_json',
)
res = self.send_upload_hook(parent, payload=self.make_payload(
name=name,
hashes={'sha256': 'foo'},
metadata={
'name': 'lakdjf',
'provider': 'testing',
}))
assert_equal(res.status_code, 200)
assert_equal(res.json['status'], 'success')
assert_is(res.json['archive'], False)
# def test_upload_update_deleted(self):
# pass
@pytest.mark.django_db
class TestUpdateMetadataHook(HookTestCase):
def setUp(self):
super(TestUpdateMetadataHook, self).setUp()
self.path = 'greasy/pízza.png'
self.record = recursively_create_file(self.node_settings, self.path)
self.version = factories.FileVersionFactory()
self.record.versions = [self.version]
self.record.save()
self.payload = {
'metadata': {
'size': 123,
'modified': 'Mon, 16 Feb 2015 18:45:34 GMT',
'md5': 'askjasdlk;jsadlkjsadf',
'sha256': 'sahduashduahdushaushda',
},
'version': self.version._id,
'size': 321, # Just to make sure the field is ignored
}
def send_metadata_hook(self, payload=None, target=None, **kwargs):
return self.send_hook(
'osfstorage_update_metadata',
{},
payload=payload or self.payload,
target=target or self.node,
method='put_json',
**kwargs
)
def test_callback(self):
self.version.external_modified = None
self.version.save()
self.send_metadata_hook()
self.version.reload()
#Test fields are added
assert_equal(self.version.metadata['size'], 123)
assert_equal(self.version.metadata['md5'], 'askjasdlk;jsadlkjsadf')
assert_equal(self.version.metadata['modified'], 'Mon, 16 Feb 2015 18:45:34 GMT')
#Test attributes are populated
assert_equal(self.version.size, 123)
assert_true(isinstance(self.version.external_modified, datetime.datetime))
def test_archived(self):
self.send_metadata_hook({
'version': self.version._id,
'metadata': {
'vault': 'osf_storage_prod',
'archive': 'Some really long glacier object id here'
}
})
self.version.reload()
assert_equal(self.version.metadata['vault'], 'osf_storage_prod')
assert_equal(self.version.metadata['archive'], 'Some really long glacier object id here')
def test_archived_record_not_found(self):
res = self.send_metadata_hook(
payload={
'metadata': {'archive': 'glacier'},
'version': self.version._id[::-1],
'size': 123,
'modified': 'Mon, 16 Feb 2015 18:45:34 GMT'
},
expect_errors=True,
)
assert_equal(res.status_code, 404)
self.version.reload()
assert_not_in('archive', self.version.metadata)
@pytest.mark.django_db
class TestGetRevisions(StorageTestCase):
def setUp(self):
super(TestGetRevisions, self).setUp()
self.path = 'tie/your/mother/down.mp3'
self.record = recursively_create_file(self.node_settings, self.path)
self.record.versions = [factories.FileVersionFactory() for __ in range(15)]
self.record.save()
def get_revisions(self, fid=None, guid=None, **kwargs):
return self.app.get(
api_url_for(
'osfstorage_get_revisions',
fid=fid or self.record._id,
guid=guid or self.project._id,
**signing.sign_data(signing.default_signer, {})
),
auth=self.user.auth,
**kwargs
)
def test_get_revisions(self):
res = self.get_revisions()
expected = [
utils.serialize_revision(
self.project,
self.record,
version,
index=self.record.versions.count() - 1 - idx
)
for idx, version in enumerate(self.record.versions.all())
]
assert_equal(len(res.json['revisions']), 15)
assert_equal(res.json['revisions'], [x for x in expected])
assert_equal(res.json['revisions'][0]['index'], 15)
assert_equal(res.json['revisions'][-1]['index'], 1)
def test_get_revisions_path_not_found(self):
res = self.get_revisions(fid='missing', expect_errors=True)
assert_equal(res.status_code, 404)
@pytest.mark.django_db
class TestCreateFolder(HookTestCase):
def setUp(self):
super(TestCreateFolder, self).setUp()
self.root_node = self.node_settings.get_root()
def create_folder(self, name, parent=None, target=None, **kwargs):
parent = parent or self.node_settings.get_root()
target = target or self.project
return self.send_hook(
'osfstorage_create_child',
{'fid': parent._id, 'guid': target._id},
payload={
'name': name,
'user': self.user._id,
'kind': 'folder'
},
target=self.project,
method='post_json',
**kwargs
)
def test_create_folder(self):
resp = self.create_folder('name')
self.root_node.reload()
assert_equal(resp.status_code, 201)
assert_equal(len(self.root_node.children), 1)
assert_equal(self.root_node.children[0].serialize(), resp.json['data'])
def test_no_data(self):
resp = self.send_hook(
'osfstorage_create_child',
{'fid': self.root_node._id, 'guid': self.project._id},
payload={},
target=self.project,
method='post_json',
expect_errors=True
)
assert_equal(resp.status_code, 400)
def test_create_with_parent(self):
resp = self.create_folder('name')
assert_equal(resp.status_code, 201)
assert_equal(self.root_node.children.count(), 1)
assert_equal(self.root_node.children.all()[0].serialize(), resp.json['data'])
resp = self.create_folder('name', parent=OsfStorageFileNode.load(resp.json['data']['id']))
assert_equal(resp.status_code, 201)
assert_equal(self.root_node.children.count(), 1)
assert_false(self.root_node.children.all()[0].is_file)
assert_equal(self.root_node.children.all()[0].children.count(), 1)
assert_false(self.root_node.children.all()[0].children.all()[0].is_file)
assert_equal(self.root_node.children.all()[0].children.all()[0].serialize(), resp.json['data'])
@pytest.mark.django_db
class TestDeleteHook(HookTestCase):
def setUp(self):
super(TestDeleteHook, self).setUp()
self.root_node = self.node_settings.get_root()
def send_hook(self, view_name, view_kwargs, payload, target, method='get', **kwargs):
method = getattr(self.app, method)
return method(
'{url}?payload={payload}&signature={signature}'.format(
url=api_url_for(view_name, guid=target._id, **view_kwargs),
**signing.sign_data(signing.default_signer, payload)
),
**kwargs
)
def delete(self, file_node, **kwargs):
return self.send_hook(
'osfstorage_delete',
{'fid': file_node._id},
payload={
'user': self.user._id
},
target=self.node,
method='delete',
**kwargs
)
def test_delete(self):
file = self.root_node.append_file('Newfile')
resp = self.delete(file)
assert_equal(resp.status_code, 200)
assert_equal(resp.json, {'status': 'success'})
fid = file._id
del file
# models.StoredFileNode._clear_object_cache()
assert_is(OsfStorageFileNode.load(fid), None)
assert_true(models.TrashedFileNode.load(fid))
def test_delete_deleted(self):
file = self.root_node.append_file('Newfile')
file.delete()
resp = self.delete(file, expect_errors=True)
assert_equal(resp.status_code, 404)
def test_cannot_delete_root(self):
resp = self.delete(self.root_node, expect_errors=True)
assert_equal(resp.status_code, 400)
def test_attempt_delete_rented_file(self):
user = factories.AuthUserFactory()
file_checked = self.root_node.append_file('Newfile')
file_checked.checkout = user
file_checked.save()
res = self.delete(file_checked, expect_errors=True)
assert_equal(res.status_code, 403)
def test_attempt_delete_while_preprint(self):
file = self.root_node.append_file('Nights')
self.node.preprint_file = file
self.node.save()
res = self.delete(file, expect_errors=True)
assert_equal(res.status_code, 403)
def test_attempt_delete_folder_with_preprint(self):
folder = self.root_node.append_folder('Fishes')
file = folder.append_file('Fish')
self.node.preprint_file = file
self.node.save()
res = self.delete(folder, expect_errors=True)
assert_equal(res.status_code, 403)
def test_delete_folder_while_preprint(self):
folder = self.root_node.append_folder('Mr. Yuck')
preprint_file = self.root_node.append_file('Thyme Out')
self.node.preprint_file = preprint_file
self.node.save()
res = self.delete(folder)
assert_equal(res.status_code, 200)
def test_delete_folder_on_preprint_with_non_preprint_file_inside(self):
folder = self.root_node.append_folder('Herbal Crooners')
file = folder.append_file('Frank Cilantro')
# project having a preprint should not block other moves
preprint_file = self.root_node.append_file('Thyme Out')
self.node.preprint_file = preprint_file
self.node.save()
res = self.delete(folder)
assert_equal(res.status_code, 200)
def test_attempt_delete_folder_with_rented_file(self):
folder = self.root_node.append_folder('Hotel Events')
user = factories.AuthUserFactory()
file_checked = folder.append_file('Checkout time')
file_checked.checkout = user
file_checked.save()
res = self.delete(folder, expect_errors=True)
assert_equal(res.status_code, 403)
def test_attempt_delete_double_nested_folder_rented_file(self):
folder = self.root_node.append_folder('One is not enough')
folder_two = folder.append_folder('Two might be doe')
user = factories.AuthUserFactory()
file_checked = folder_two.append_file('We shall see')
file_checked.checkout = user
file_checked.save()
res = self.delete(folder, expect_errors=True)
assert_equal(res.status_code, 403)
@pytest.mark.django_db
@pytest.mark.enable_quickfiles_creation
class TestMoveHook(HookTestCase):
def setUp(self):
super(TestMoveHook, self).setUp()
self.root_node = self.node_settings.get_root()
def test_move_hook(self):
file = self.root_node.append_file('Ain\'t_got_no,_I_got_life')
folder = self.root_node.append_folder('Nina Simone')
res = self.send_hook(
'osfstorage_move_hook',
{'guid': self.root_node.target._id},
payload={
'source': file._id,
'target': self.root_node._id,
'user': self.user._id,
'destination': {
'parent': folder._id,
'target': folder.target._id,
'name': folder.name,
}
},
target=self.node,
method='post_json',)
assert_equal(res.status_code, 200)
def test_move_checkedout_file(self):
file = self.root_node.append_file('Ain\'t_got_no,_I_got_life')
file.checkout = self.user
file.save()
folder = self.root_node.append_folder('Nina Simone')
res = self.send_hook(
'osfstorage_move_hook',
{'guid': self.root_node.target._id},
payload={
'source': file._id, # id of the actual file
'target': self.root_node._id, # the source FOLDER
'user': self.user._id,
'destination': {
'parent': folder._id, # the destination FOLDER
'target': folder.target._id, # The TARGET for the folder where it is going
'name': folder.name,
}
},
target=self.node,
method='post_json',
expect_errors=True,
)
assert_equal(res.status_code, 405)
def test_move_checkedout_file_in_folder(self):
folder = self.root_node.append_folder('From Here')
file = folder.append_file('No I don\'t wanna go')
file.checkout = self.user
file.save()
folder_two = self.root_node.append_folder('To There')
res = self.send_hook(
'osfstorage_move_hook',
{'guid': self.root_node.target._id},
payload={
'source': folder._id,
'target': self.root_node._id,
'user': self.user._id,
'destination': {
'parent': folder_two._id,
'target': folder_two.target._id,
'name': folder_two.name,
}
},
target=self.node,
method='post_json',
expect_errors=True,
)
assert_equal(res.status_code, 405)
def test_move_checkedout_file_two_deep_in_folder(self):
folder = self.root_node.append_folder('From Here')
folder_nested = folder.append_folder('Inbetween')
file = folder_nested.append_file('No I don\'t wanna go')
file.checkout = self.user
file.save()
folder_two = self.root_node.append_folder('To There')
res = self.send_hook(
'osfstorage_move_hook',
{'guid': self.root_node.target._id},
payload={
'source': folder._id,
'target': self.root_node._id,
'user': self.user._id,
'destination': {
'parent': folder_two._id,
'target': folder_two.target._id,
'name': folder_two.name,
}
},
target=self.node,
method='post_json',
expect_errors=True,
)
assert_equal(res.status_code, 405)
def test_move_preprint_file_out_of_node(self):
folder = self.root_node.append_folder('From Here')
file = folder.append_file('No I don\'t wanna go')
self.node.preprint_file = file
self.node.save()
project = ProjectFactory(creator=self.user)
project_settings = project.get_addon('osfstorage')
project_root_node = project_settings.get_root()
folder_two = project_root_node.append_folder('To There')
res = self.send_hook(
'osfstorage_move_hook',
{'guid': self.root_node.target._id},
payload={
'source': file._id,
'target': self.root_node._id,
'user': self.user._id,
'destination': {
'parent': folder_two._id,
'target': folder_two.target._id,
'name': folder_two.name,
}
},
target=project,
method='post_json',
expect_errors=True,
)
assert_equal(res.status_code, 403)
def test_move_file_out_of_node(self):
folder = self.root_node.append_folder('A long time ago')
file = folder.append_file('in a galaxy')
# project having a preprint should not block other moves
preprint_file = self.root_node.append_file('far')
self.node.preprint_file = preprint_file
self.node.save()
project = ProjectFactory(creator=self.user)
project_settings = project.get_addon('osfstorage')
project_root_node = project_settings.get_root()
folder_two = project_root_node.append_folder('far away')
res = self.send_hook(
'osfstorage_move_hook',
{'guid': self.root_node.target._id},
payload={
'source': folder._id,
'target': self.root_node._id,
'user': self.user._id,
'destination': {
'parent': folder_two._id,
'target': folder_two.target._id,
'name': folder_two.name,
}
},
target=project,
method='post_json',
expect_errors=True,
)
assert_equal(res.status_code, 200)
def test_within_node_move_while_preprint(self):
file = self.root_node.append_file('Self Control')
self.node.preprint_file = file
self.node.save()
folder = self.root_node.append_folder('Frank Ocean')
res = self.send_hook(
'osfstorage_move_hook',
{'guid': self.root_node.target._id},
payload={
'source': file._id,
'target': self.root_node._id,
'user': self.user._id,
'destination': {
'parent': folder._id,
'target': folder.target._id,
'name': folder.name,
}
},
target=self.node,
method='post_json',
expect_errors=True,
)
assert_equal(res.status_code, 200)
def test_can_move_file_out_of_quickfiles_node(self):
quickfiles_node = QuickFilesNode.objects.get_for_user(self.user)
quickfiles_file = create_test_file(quickfiles_node, self.user, filename='slippery.mp3')
quickfiles_folder = OsfStorageFolder.objects.get_root(target=quickfiles_node)
dest_folder = OsfStorageFolder.objects.get_root(target=self.project)
res = self.send_hook(
'osfstorage_move_hook',
{'guid': quickfiles_node._id},
payload={
'source': quickfiles_file._id,
'target': quickfiles_node._id,
'user': self.user._id,
'destination': {
'parent': dest_folder._id,
'target': self.project._id,
'name': dest_folder.name,
}
},
target=quickfiles_node,
method='post_json',
)
assert_equal(res.status_code, 200)
def test_can_rename_file_in_quickfiles_node(self):
quickfiles_node = QuickFilesNode.objects.get_for_user(self.user)
quickfiles_file = create_test_file(quickfiles_node, self.user, filename='road_dogg.mp3')
quickfiles_folder = OsfStorageFolder.objects.get_root(target=quickfiles_node)
dest_folder = OsfStorageFolder.objects.get_root(target=self.project)
new_name = 'JesseJames.mp3'
res = self.send_hook(
'osfstorage_move_hook',
{'guid': quickfiles_node._id},
payload={
'action': 'rename',
'source': quickfiles_file._id,
'target': quickfiles_node._id,
'user': self.user._id,
'name': quickfiles_file.name,
'destination': {
'parent': quickfiles_folder._id,
'target': quickfiles_node._id,
'name': new_name,
}
},
target=quickfiles_node,
method='post_json',
expect_errors=True,
)
quickfiles_file.reload()
assert_equal(res.status_code, 200)
assert_equal(quickfiles_file.name, new_name)
@pytest.mark.django_db
@pytest.mark.enable_quickfiles_creation
class TestCopyHook(HookTestCase):
@pytest.mark.enable_implicit_clean
def test_can_copy_file_out_of_quickfiles_node(self):
quickfiles_node = QuickFilesNode.objects.get_for_user(self.user)
quickfiles_folder = OsfStorageFolder.objects.get_root(target=quickfiles_node)
dest_folder = OsfStorageFolder.objects.get_root(target=self.project)
res = self.send_hook(
'osfstorage_copy_hook',
{'guid': quickfiles_node._id},
payload={
'source': quickfiles_folder._id,
'target': quickfiles_node._id,
'user': self.user._id,
'destination': {
'parent': dest_folder._id,
'target': self.project._id,
'name': dest_folder.name,
}
},
target=self.project,
method='post_json',
)
assert_equal(res.status_code, 201)
@pytest.mark.django_db
class TestFileTags(StorageTestCase):
def test_file_add_tag(self):
file = self.node_settings.get_root().append_file('Good Morning.mp3')
assert_not_in('Kanye_West', file.tags.values_list('name', flat=True))
url = api_url_for('osfstorage_add_tag', guid=self.node._id, fid=file._id)
self.app.post_json(url, {'tag': 'Kanye_West'}, auth=self.user.auth)
file.reload()
assert_in('Kanye_West', file.tags.values_list('name', flat=True))
def test_file_add_non_ascii_tag(self):
file = self.node_settings.get_root().append_file('JapaneseCharacters.txt')
assert_not_in('コンサート', file.tags.values_list('name', flat=True))
url = api_url_for('osfstorage_add_tag', guid=self.node._id, fid=file._id)
self.app.post_json(url, {'tag': 'コンサート'}, auth=self.user.auth)
file.reload()
assert_in('コンサート', file.tags.values_list('name', flat=True))
def test_file_remove_tag(self):
file = self.node_settings.get_root().append_file('Champion.mp3')
tag = Tag(name='Graduation')
tag.save()
file.tags.add(tag)
file.save()
assert_in('Graduation', file.tags.values_list('name', flat=True))
url = api_url_for('osfstorage_remove_tag', guid=self.node._id, fid=file._id)
self.app.delete_json(url, {'tag': 'Graduation'}, auth=self.user.auth)
file.reload()
assert_not_in('Graduation', file.tags.values_list('name', flat=True))
def test_tag_the_same_tag(self):
file = self.node_settings.get_root().append_file('Lie,Cheat,Steal.mp3')
tag = Tag(name='Run_the_Jewels')
tag.save()
file.tags.add(tag)
file.save()
assert_in('Run_the_Jewels', file.tags.values_list('name', flat=True))
url = api_url_for('osfstorage_add_tag', guid=self.node._id, fid=file._id)
res = self.app.post_json(url, {'tag': 'Run_the_Jewels'}, auth=self.user.auth, expect_errors=True)
assert_equal(res.status_code, 400)
assert_equal(res.json['status'], 'failure')
def test_remove_nonexistent_tag(self):
file = self.node_settings.get_root().append_file('WonderfulEveryday.mp3')
assert_not_in('Chance', file.tags.values_list('name', flat=True))
url = api_url_for('osfstorage_remove_tag', guid=self.node._id, fid=file._id)
res = self.app.delete_json(url, {'tag': 'Chance'}, auth=self.user.auth, expect_errors=True)
assert_equal(res.status_code, 400)
assert_equal(res.json['status'], 'failure')
def test_file_add_tag_creates_log(self):
file = self.node_settings.get_root().append_file('Yeezy Season 3.mp4')
url = api_url_for('osfstorage_add_tag', guid=self.node._id, fid=file._id)
res = self.app.post_json(url, {'tag': 'Kanye_West'}, auth=self.user.auth)
assert_equal(res.status_code, 200)
self.node.reload()
assert_equal(self.node.logs.latest().action, 'file_tag_added')
@mock.patch('addons.osfstorage.models.OsfStorageFile.add_tag_log')
def test_file_add_tag_fail_doesnt_create_log(self, mock_log):
file = self.node_settings.get_root().append_file('UltraLightBeam.mp3')
tag = Tag(name='The Life of Pablo')
tag.save()
file.tags.add(tag)
file.save()
url = api_url_for('osfstorage_add_tag', guid=self.node._id, fid=file._id)
res = self.app.post_json(url, {'tag': 'The Life of Pablo'}, auth=self.user.auth, expect_errors=True)
assert_equal(res.status_code, 400)
mock_log.assert_not_called()
def test_file_remove_tag_creates_log(self):
file = self.node_settings.get_root().append_file('Formation.flac')
tag = Tag(name='You that when you cause all this conversation')
tag.save()
file.tags.add(tag)
file.save()
url = api_url_for('osfstorage_remove_tag', guid=self.node._id, fid=file._id)
res = self.app.delete_json(url, {'tag': 'You that when you cause all this conversation'}, auth=self.user.auth)
assert_equal(res.status_code, 200)
self.node.reload()
assert_equal(self.node.logs.latest().action, 'file_tag_removed')
@mock.patch('addons.osfstorage.models.OsfStorageFile.add_tag_log')
def test_file_remove_tag_fail_doesnt_create_log(self, mock_log):
file = self.node_settings.get_root().append_file('For-once-in-my-life.mp3')
url = api_url_for('osfstorage_remove_tag', guid=self.node._id, fid=file._id)
res = self.app.delete_json(url, {'tag': 'wonder'}, auth=self.user.auth, expect_errors=True)
assert_equal(res.status_code, 400)
mock_log.assert_not_called()
@pytest.mark.django_db
@pytest.mark.enable_bookmark_creation
class TestFileViews(StorageTestCase):
def test_file_views(self):
file = create_test_file(target=self.node, user=self.user)
url = self.node.web_url_for('addon_view_or_download_file', path=file._id, provider=file.provider)
# Test valid url file 200 on redirect
redirect = self.app.get(url, auth=self.user.auth)
assert redirect.status_code == 302
res = redirect.follow(auth=self.user.auth)
assert res.status_code == 200
# Test invalid node but valid deep_url redirects (moved log urls)
project_two = ProjectFactory(creator=self.user)
url = project_two.web_url_for('addon_view_or_download_file', path=file._id, provider=file.provider)
redirect = self.app.get(url, auth=self.user.auth)
assert redirect.status_code == 302
redirect_two = redirect.follow(auth=self.user.auth)
assert redirect_two.status_code == 302
res = redirect_two.follow(auth=self.user.auth)
assert res.status_code == 200
def test_download_file(self):
file = create_test_file(target=self.node, user=self.user)
folder = self.node_settings.get_root().append_folder('Folder')
base_url = '/download/{}/'
# Test download works with path
url = base_url.format(file._id)
redirect = self.app.get(url, auth=self.user.auth)
assert redirect.status_code == 302
# Test download works with guid
url = base_url.format(file.get_guid()._id)
redirect = self.app.get(url, auth=self.user.auth)
assert redirect.status_code == 302
# Test nonexistant file 404's
url = base_url.format('FakeGuid')
redirect = self.app.get(url, auth=self.user.auth, expect_errors=True)
assert redirect.status_code == 404
# Test folder 400's
url = base_url.format(folder._id)
redirect = self.app.get(url, auth=self.user.auth, expect_errors=True)
assert redirect.status_code == 400
@responses.activate
@mock.patch('framework.auth.cas.get_client')
def test_download_file_with_token(self, mock_get_client):
cas_base_url = 'http://accounts.test.test'
client = cas.CasClient(cas_base_url)
mock_get_client.return_value = client
base_url = '/download/{}/'
file = create_test_file(target=self.node, user=self.user)
responses.add(
responses.Response(
responses.GET,
'{}/oauth2/profile'.format(cas_base_url),
body=json.dumps({'id': '{}'.format(self.user._id)}),
status=200,
)
)
download_url = base_url.format(file.get_guid()._id)
token = ApiOAuth2PersonalTokenFactory(owner=self.user)
headers = {
'Authorization': str('Bearer {}'.format(token.token_id))
}
redirect = self.app.get(download_url, headers=headers)
assert mock_get_client.called
assert settings.WATERBUTLER_URL in redirect.location
assert redirect.status_code == 302
|
erinspace/osf.io
|
addons/osfstorage/tests/test_views.py
|
Python
|
apache-2.0
| 42,315
|
[
"Galaxy"
] |
6e6e97ea18526e98265ebff15aa26bed49818753ff6312f5e0a43f0a2c505ef4
|
# -*- coding: utf-8 -*-
# General Django settings for mysite project.
import os
import sys
import django.conf.global_settings as DEFAULT_SETTINGS
import logging
import mysite.pipelinefiles as pipelinefiles
import mysite.utils as utils
from celery.schedules import crontab
try:
import psycopg2
except ImportError:
# If psycopg2 is not installed, expect psycopg2cffi, which can be used with
# PyP. Make sure psycopg2cffi runs in compatibility mode so that it can be
# imported as psycopg2.
try:
from psycopg2cffi import compat
compat.register()
except ImportError:
raise ImportError("Need either psycopg2 or psycopg2cffi")
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Make Django root folder available
PROJECT_ROOT = utils.relative('..', '..')
# Add all subdirectories of project, applications and lib to sys.path
for subdirectory in ('projects', 'applications', 'lib'):
full_path = os.path.join(PROJECT_ROOT, subdirectory)
sys.path.insert(0, full_path)
# A list of people who get code error notifications. They will get an email
# if DEBUG=False and a view raises an exception.
ADMINS = (
# ('Your Name', 'your_email@domain.com'),
)
# At the moment CATMAID doesn't support internationalization and all strings are
# expected to be in English.
LANGUAGE_CODE = 'en-gb'
# A tuple in the same format as ADMINS of people who get broken-link
# notifications when SEND_BROKEN_LINKS_EMAILS=True.
MANAGERS = ADMINS
# If you set this to False, Django will make some optimizations so as not
# to load the internationalization machinery.
USE_I18N = True
MIDDLEWARE = [
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
# For API tokens. Disable if not using HTTPS:
'catmaid.middleware.AuthenticationHeaderExtensionMiddleware',
'catmaid.middleware.CsrfBypassTokenAuthenticationMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'catmaid.middleware.AnonymousAuthenticationMiddleware',
'catmaid.middleware.AjaxExceptionMiddleware',
]
ROOT_URLCONF = 'mysite.urls'
INSTALLED_APPS = (
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.messages',
'django.contrib.sessions',
'django.contrib.sites',
# Instead of 'django.contrib.admin', in order to disable the automatic
# auto-discovery, which would interfer with django-adminplus.
'django.contrib.admin.apps.SimpleAdminConfig',
'django.contrib.staticfiles',
'django.contrib.gis',
'taggit',
'adminplus',
'guardian',
'catmaid',
'pgcompat',
'performancetests',
'pipeline',
'rest_framework',
'rest_framework.authtoken',
'rest_framework_swagger',
'channels',
)
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'formatters': {
'verbose': {
'format': '%(levelname)s %(asctime)s %(module)s %(process)d %(thread)d %(message)s'
},
'simple': {
'format': '%(levelname)s %(asctime)s %(message)s'
},
},
'handlers': {
'null': {
'level': 'DEBUG',
'class': 'logging.NullHandler',
},
'console': {
'level': 'DEBUG',
'class': 'logging.StreamHandler',
'formatter': 'simple'
}
},
'loggers': {
'catmaid': {
'handlers': ['console'],
'level': 'INFO',
'propagate': True,
},
'catmaid.frontend': {
'handlers': ['console'],
'level': 'INFO',
'propagate': True,
},
'celery': {
'handlers': ['console'],
'level': 'INFO',
'propagate': True,
},
},
}
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [ # Extra folders
os.path.join(BASE_DIR, 'templates'),
],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.contrib.auth.context_processors.auth',
'django.template.context_processors.debug',
'django.template.context_processors.i18n',
'django.template.context_processors.media',
'django.template.context_processors.static',
'django.template.context_processors.tz',
'django.contrib.messages.context_processors.messages'
],
}
},
]
# The URL requests are redirected after login
LOGIN_REDIRECT_URL = '/'
# The URL where requests are redirected after login
LOGIN_URL = '/accounts/login'
AUTHENTICATION_BACKENDS = (
'django.contrib.auth.backends.ModelBackend', # default
'guardian.backends.ObjectPermissionBackend',
# For API tokens. Disable if not using HTTPS:
'rest_framework.authentication.TokenAuthentication',
)
# If a request is authenticated through an API token permissions are
# required, endpoints that require write/annotate permissions also
# need to have the TokenAnnotate permission. This is enforced also
# for admin accounts.
REQUIRE_EXTRA_TOKEN_PERMISSIONS = True
# Main ASGI router for CATMAID
ASGI_APPLICATION = "mysite.routing.application"
# Project ID of a dummy project that will keep all ontologies and
# classifications that are shared between multiple projects (and are
# thereby project independent).
ONTOLOGY_DUMMY_PROJECT_ID = -1
# Store datetimes as UTC by default. If stored datetimes have a timezone or
# offset, interpret it.
USE_TZ = True
# The current site in the django_site database table. This is used so that
# applications can hook into specific site(s) and a single database can manage
# content of multiple sites.
SITE_ID = 1
# Defines which type of spatial query should be used for treenodes. The
# available options are 'classic', 'postgis2d' and 'postgis3d'. Additionally,
# cache tables can be populated, which allows to make use of the following node
# providers: cached_json, cached_json_text and cached_msgpack. If multiple are
# provided, node providers are asked one after the other for a result until a
# result is returned. Entries can either be node provider names or tuples of
# the form (name, options) to provide options for a particular node provider.
NODE_PROVIDERS = [
'postgis3d'
]
# By default, prepared statements are disabled. If connection pooling is used,
# this can further improve performance.
PREPARED_STATEMENTS = False
# History tables are created and populated by default. They keep track of every
# change in all CATMAID tables plus some additional ones. If this is not
# wanted, history tables can be disabled by setting HISTORY_TRACKING to False.
# Note that the tables will still exist, but only not populated.
HISTORY_TRACKING = True
# Default user profile settings
PROFILE_INDEPENDENT_ONTOLOGY_WORKSPACE_IS_DEFAULT = False
PROFILE_SHOW_TEXT_LABEL_TOOL = False
PROFILE_SHOW_TAGGING_TOOL = False
PROFILE_SHOW_CROPPING_TOOL = False
PROFILE_SHOW_SEGMENTATION_TOOL = False
PROFILE_SHOW_TRACING_TOOL = False
PROFILE_SHOW_ONTOLOGY_TOOL = False
PROFILE_SHOW_ROI_TOOL = False
# Defines if a cropped image of a ROI should be created
# automatically when the ROI is created. If set to False
# such an image will be created when requested.
ROI_AUTO_CREATE_IMAGE = False
# A limit on the size of the result returned by a single spatial query. This
# determines the maximum number of nodes shown in the tracing overlay, so has
# severe worst-case performance implications for the database, web server, and
# client. Note that this is not a direct limit on the number of nodes in the
# result; that will be between 1x and 2x this value.
NODE_LIST_MAXIMUM_COUNT = 3500
# Default importer tile width, tile height and tile source type
IMPORTER_DEFAULT_DATA_SOURCE = 'filesystem'
IMPORTER_DEFAULT_TILE_WIDTH = 512
IMPORTER_DEFAULT_TILE_HEIGHT = 512
IMPORTER_DEFAULT_TILE_SOURCE_TYPE = 1
IMPORTER_DEFAULT_IMAGE_BASE = ''
# Some tools and widgets create files (e.g. cropping, ROIs, NeuroHDF5 and
# treenode export). These files will be created in a folder for each tool
# relative to the path defined in Django's MEDIA_ROOT variable. These are
# the default sub-folders, all of them need to be writable:
MEDIA_HDF5_SUBDIRECTORY = 'hdf5'
MEDIA_CROPPING_SUBDIRECTORY = 'cropping'
MEDIA_ROI_SUBDIRECTORY = 'roi'
MEDIA_TREENODE_SUBDIRECTORY = 'treenode_archives'
MEDIA_EXPORT_SUBDIRECTORY = 'export'
MEDIA_CACHE_SUBDIRECTORY = 'cache'
# Cropping output extension
CROPPING_OUTPUT_FILE_EXTENSION = "tiff"
CROPPING_OUTPUT_FILE_PREFIX = "crop_"
CROPPING_VERIFY_CERTIFICATES = True
# The maximum allowed size in Bytes for generated files. The cropping tool, for
# instance, uses this to cancel a request if the generated file grows larger
# than this. This defaults to 50 Megabyte.
GENERATED_FILES_MAXIMUM_SIZE = 52428800
# The maximum allowed size in bytes for files uploaded for import as skeletons.
# The default is 5 megabytes.
IMPORTED_SKELETON_FILE_MAXIMUM_SIZE = 5242880
# The maximum allowed image size for imported images. The default is 3MB.
IMPORTED_IMAGE_FILE_MAXIMUM_SIZE = 3145728
# The maximum allowd body data size, default is 10 MB.
DATA_UPLOAD_MAX_MEMORY_SIZE = 10 * 8 * 1024**2
# Specifies if user registration is allowed
USER_REGISTRATION_ALLOWED = False
# A new user's defaul groups
NEW_USER_DEFAULT_GROUPS = []
# While pickle can cause security problems [1], we allow it for now and trust
# that the Celery server will only accept connections from CATMAID. To improve
# security, this should be changed though, see also [2].
# [1] http://docs.celeryproject.org/en/latest/userguide/security.html#serializers
# [2] https://github.com/catmaid/CATMAID/issues/630
CELERY_ACCEPT_CONTENT = ['pickle']
CELERY_TASK_SERIALIZER = 'pickle'
# The default set of periodic tasks
CELERY_BEAT_SCHEDULE = {
# Clean cropped stack directory every night at 23:30.
'daily-crop-data-cleanup': {
'task': 'catmaid.tasks.cleanup_cropped_stacks',
'schedule': crontab(hour=23, minute=30)
},
# Update project statistics every night at 23:45.
'daily-project-stats-summary-update': {
'task': 'catmaid.tasks.update_project_statistics_from_scratch',
'schedule': crontab(hour=23, minute=45)
},
'daily-inactive-user-update': {
'task': 'catmaid.tasks.deactivate_inactive_users',
'schedule': crontab(hour=00, minute=00)
},
}
# We use django-pipeline to compress and reference JavaScript and CSS files. To
# make Pipeline integrate with staticfiles (and therefore collecstatic calls)
# the STATICFILES_STORAGE variable has to be set to:
STATICFILES_STORAGE = 'pipeline.storage.PipelineCachedStorage'
# Adding PipelineFinder as asset discovery mechanism allows staticfiles to also
# discover files that were generated by Pipeline.
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
)
PIPELINE = {
# Use CSSMin as django-pipeline's CSS compressor
'CSS_COMPRESSOR': 'pipeline.compressors.cssmin.CSSMinCompressor',
# Use no JS compresor for now
'JS_COMPRESSOR': None,
# Don't wrap JS files into anonymous functions. Our code isn't ready for
# this, yet.
'DISABLE_WRAPPER': True,
# All static files that are run through pipeline
'STYLESHEETS': pipelinefiles.STYLESHEETS,
'JAVASCRIPT': pipelinefiles.JAVASCRIPT
}
# Make a list of files that should be included directly (bypassing pipeline)
# and a list of pipeline identifiers for all others.
NON_COMPRESSED_FILE_IDS = list(pipelinefiles.non_pipeline_js)
NON_COMPRESSED_FILES = list(pipelinefiles.non_pipeline_js.values())
COPY_ONLY_FILE_IDS = set(pipelinefiles.copy_only_files)
STYLESHEET_IDS = list(pipelinefiles.STYLESHEETS)
COMPRESSED_FILE_IDS = [key for key in pipelinefiles.JAVASCRIPT \
if key not in NON_COMPRESSED_FILE_IDS \
and key not in COPY_ONLY_FILE_IDS]
INSTALLED_EXTENSIONS = tuple(pipelinefiles.installed_extensions)
# Make Git based version of CATMAID available as a settings field
VERSION = utils.get_version()
# Janelia rendering service. To activate add the following lines to your
# settings.py file:
# MIDDLEWARE += ('catmaid.middleware.JaneliaRenderMiddleware',)
# JANELIA_RENDER_SERVICE_URL = 'http://renderer.int.janelia.org:8080/render-ws/v1'
# JANELIA_RENDER_DEFAULT_STACK_RESOLUTION = (4,4,35)
# JANELIA_RENDER_STACK_TILE_WIDTH = 1024
# JANELIA_RENDER_STACK_TILE_HEIGHT = 1024
# DVID auto-discovery. To activate add the following lines to your settings.py
# file:
# MIDDLEWARE += ('catmaid.middleware.DVIDMiddleware',)
# DVID_URL = 'http://emdata2.int.janelia.org:7000'
# DVID_FORMAT = 'jpg:80'
# DVID_SHOW_NONDISPLAYABLE_REPOS = True
# In order to make Django work with the unmanaged models from djsopnet in tests,
# we use a custom testing runner to detect when running in a testing
# environment. The custom PostgreSQL database wrapper uses this flag to change
# its behavior.
TEST_RUNNER = 'custom_testrunner.TestSuiteRunner'
# By default, front end tests are disabled.
FRONT_END_TESTS_ENABLED = False
# By default GUI tests are disabled. Enable them by setting GUI_TESTS_ENABLED to
# True (done during CI).
GUI_TESTS_ENABLED = False
GUI_TESTS_REMOTE = False
# To simplify configuration for performance test CATMAID instances, the SCM URL
# used to create commit links is defined here. The {} is used to denote the
# commit name.
PERFORMANCETEST_SCM_URL = "https://github.com/catmaid/CATMAID/commit/{version}"
# This setting allows the WSGI back-end to serve static files. It is highly
# discouraged to use this in production as it is very in-efficient and
# potentially insecure. It is used only to simplify continuous integration.
SERVE_STATIC = False
# Additional static files can be loaded by CATMAID if they are placed in the
# folder defined by STATIC_EXTENSION_ROOT. These files are not respected by
# Pipeline to allow updating them without running collectstatic. To use this
# feature, your webserver has to resolve the STATIC_EXTENSION_URL to this
# folder.
STATIC_EXTENSION_URL = "/staticext/"
STATIC_EXTENSION_ROOT = "/tmp"
STATIC_EXTENSION_FILES = []
# Default cookie suffix, should be customized if multiple CATMAID instances run
# on the same server, e.g. with:
# hashlib.md5(CATMAID_URL.encode('utf-8')).hexdigest()
COOKIE_SUFFIX = 'catmaid'
# The CATMAID web client sends list by sending each list element in its own
# field. Django allows by default 1000 fields. To allow large neuron lists, we
# need to disable this check for now.
DATA_UPLOAD_MAX_NUMBER_FIELDS = None
REST_FRAMEWORK = {
'DEFAULT_AUTHENTICATION_CLASSES': (
'rest_framework.authentication.TokenAuthentication',
'rest_framework.authentication.SessionAuthentication',
),
# If no authentication is possible, use guardian's anonymous user
'UNAUTHENTICATED_USER': 'guardian.utils.get_anonymous_user',
'VIEW_DESCRIPTION_FUNCTION': 'custom_rest_swagger_googledoc.get_googledocstring',
# Parser classes priority-wise for Swagger
'DEFAULT_PARSER_CLASSES': [
'rest_framework.parsers.FormParser',
'rest_framework.parsers.MultiPartParser',
'rest_framework.parsers.JSONParser',
],
'DEFAULT_SCHEMA_CLASS': 'custom_swagger_schema.CustomSchema',
'URL_FORMAT_OVERRIDE': None,
}
SWAGGER_SETTINGS = {
'DOC_EXPANSION': 'list',
'APIS_SORTER': 'alpha'
}
# Needed for NRRD export
CATMAID_FULL_URL = ""
CATMAID_HTTP_AUTH_USER = None
CATMAID_HTTP_AUTH_PASS = None
# Whether or not to create default data views in the initial migration. This is
# mainly useful for setups using the JaneliaRender or DVID middleware.
CREATE_DEFAULT_DATAVIEWS = True
# NBLAST support
NBLAST_ALL_BY_ALL_MIN_SIZE = 10
MAX_PARALLEL_ASYNC_WORKERS = 1
# Intersection grid settings, dimensions in project coordinates (nm)
DEFAULT_CACHE_GRID_CELL_WIDTH = 25000
DEFAULT_CACHE_GRID_CELL_HEIGHT = 25000
DEFAULT_CACHE_GRID_CELL_DEPTH = 40
# Whether Postgres should emit "catmaid.spatial-update" events on changes of
# spatial data (e.g. inserts, updates and deletions of treenodes, connectors and
# connector links).
SPATIAL_UPDATE_NOTIFICATIONS = False
# On statup, the default client instance settings can be populated based on a
# JSON string, representing a list of objects with a "key" field and a "value"
# field. These settings will only be applied if they exist already.
INSTANCE_CLIENT_SETTINGS = None
# Whether or not the set up instance client settings in INSTANCE_CLIENT_SETTINGS
# should be reset every time CATMAID starts. Otherwise, they will only be
# applied if they don't exist already.
FORCE_CLIENT_SETTINGS = False
|
tomka/CATMAID
|
django/projects/mysite/settings_base.py
|
Python
|
gpl-3.0
| 16,810
|
[
"NEURON"
] |
797da6467c35af71520e7d5061c41a33b176cab41003c74e05550287fc6153c2
|
# 130. Surrounded Regions QuestionEditorial Solution My Submissions # Total Accepted: 66921
# Total Submissions: 390322
# Difficulty: Medium
# Contributors: Admin
# Given a 2D board containing 'X' and 'O' (the letter O), capture all regions surrounded by 'X'.
#
# A region is captured by flipping all 'O's into 'X's in that surrounded region.
#
# For example,
# X X X X
# X O O X
# X X O X
# X O X X
# After running your function, the board should be:
#
# X X X X
# X X X X
# X X X X
# X O X X
# Subscribe to see which companies asked this question
# 2018.04.08
from collections import deque
class Solution(object):
def solve(self, board):
"""
:type board: List[List[str]]
:rtype: void Do not return anything, modify board in-place instead.
"""
if not board or not board[0]: return
m, n = len(board), len(board[0])
vis = set()
d = deque()
for i in xrange(m):
for j in xrange(n):
if (i == 0 or j == 0 or i == m - 1 or j == n - 1) and board[i][j] == "O":
d.appendleft((i, j))
while d:
i, j = d.pop()
if (i, j) in vis: continue
vis.add((i, j))
for pair in [[0, -1], [-1, 0], [0, 1], [1, 0]]:
ii, jj = i + pair[0], j + pair[1]
if ii >= 0 and ii < m and jj >= 0 and jj < n and board[ii][jj] == "O" and (ii, jj) not in vis:
d.appendleft((ii, jj))
for i in xrange(m):
for j in xrange(n):
if (i, j) not in vis and board[i][j] == "O":
board[i][j] = 'X'
return
# BFS Algorithms:
# 1. Scan bondaries and appendleft to deque()
# 2. pop, check adjancet and push to deque()
# 3. Rescan mid board and flip not in isVis
# 2017.02.22 BFS
from collections import deque
class Solution(object):
def solve(self, board):
"""
:type board: List[List[str]]
:rtype: void Do not return anything, modify board in-place instead.
"""
if not board: return
m, n = len(board), len(board[0])
if m <= 2 or n <= 2: return
d = deque()
visited = [ [ False for j in xrange(n) ] for i in xrange(m) ]
for j in xrange(n):
if board[0][j] == 'O' and not visited[0][j]:
d.appendleft([0, j])
if board[m-1][j] == 'O' and not visited[m-1][j]:
d.appendleft([m - 1, j])
for i in xrange(1, m-1):
if board[i][0] == 'O' and not visited[i][0]:
d.appendleft([i, 0])
if board[i][n-1] == 'O' and not visited[i][n-1]:
d.appendleft([i, n - 1])
while d:
i, j = d.pop()
visited[i][j] = True
if i + 1 < m and board[i+1][j] == 'O' and not visited[i+1][j]:
d.appendleft([i + 1, j])
if j + 1 < n and board[i][j+1] == 'O' and not visited[i][j+1]:
d.appendleft([i, j + 1])
if i - 1 >= 0 and board[i-1][j] == 'O' and not visited[i-1][j]:
d.appendleft([i-1, j])
if j - 1 >= 0 and board[i][j-1] == 'O' and not visited[i][j-1]:
d.appendleft([i, j-1])
for i in xrange(1, m - 1):
for j in xrange(1, n - 1):
if board[i][j] == 'O' and not visited[i][j]:
board[i][j] = 'X'
return
# 12.06.2016 Rewrite. DFS cannot pass the new test case. Need to review to BFS
class Solution(object):
def solve(self, board):
"""
:type board: List[List[str]]
:rtype: void Do not return anything, modify board in-place instead.
"""
if not board: return
m, n = len(board), len(board[0])
if m <= 2 or n <= 2: return
starts = []
visited = [[ False for j in xrange(n)] for i in xrange(m)]
for j in xrange(n):
if board[0][j] == 'O':
visited[0][j] = True
starts.append([0, j])
if board[m-1][j] == 'O':
visited[m-1][j] = True
starts.append([m-1, j])
for i in xrange(1, m-1):
if board[i][0] == 'O':
visited[i][0] = True
starts.append([i, 0])
if board[i][n-1] == 'O':
visited[i][n-1] = True
starts.append([i, n-1])
for i, j in starts:
self.dfs(board, visited, i, j, m, n)
for i in xrange(1, m-1):
for j in xrange(1, n-1):
if board[i][j] == 'O' and not visited[i][j]:
board[i][j] = 'X'
return
def dfs(self, board, visited, i, j, m, n):
neighbors = []
for x, y in [[i+1, j], [i-1, j], [i, j+1], [i, j-1]]:
if (x >= 0 and y >= 0 and x < m and y < n and not visited[x][y] and board[x][y] == 'O'):
neighbors.append([x, y])
for x, y in neighbors:
visited[x][y] = True
self.dfs(board, visited, x, y, m, n)
# Notes:
# 'O' not 0
# OJ input is list
# BFS deque solution
from collections import deque
class Solution(object):
def solve(self, board):
"""
:type board: List[List[str]]
:rtype: void Do not return anything, modify board in-place instead.
"""
# 1. Record all boundaries in deque.
if not board: return # Corner case
d = deque()
m, n = len(board), len(board[0])
if m <= 2 or n <= 2: # Corner case
return
isVis = [ [False for j in xrange(n)] for i in xrange(m)]
for j in xrange(n):
if board[0][j] == 'O': d.appendleft([0, j])
if board[m-1][j] == 'O': d.appendleft([m-1, j])
for i in xrange(1, m-1):
if board[i][0] == 'O': d.appendleft([i, 0])
if board[i][n-1] == 'O': d.appendleft([i, n-1])
# 2. For each deque. visit adjancent. mark them in isVis[][]
while d:
print(d)
i, j = d.pop()
if isVis[i][j]:
continue
if i + 1 < m and board[i+1][j] == 'O' and not isVis[i+1][j]:
d.appendleft([i+1, j])
if i - 1 >= 0 and board[i-1][j] == 'O' and not isVis[i-1][j]:
d.appendleft([i-1, j])
if j + 1 < n and board[i][j+1] == 'O' and not isVis[i][j+1]:
d.appendleft([i, j+1])
if j - 1 >= 0 and board[i][j-1] == 'O' and not isVis[i][j-1]:
d.appendleft([i, j-1])
isVis[i][j] = True
# 3. scan inside board. if not in visited.
for j in xrange(1, n-1):
for i in xrange(1, m-1):
if board[i][j] == 'O' and not isVis[i][j]:
board[i] = board[i][:j] + 'X' + board[i][j+1:]
#board[i][j] = 'X' #If each line is list
return
if __name__ == "__main__":
board = ["OXO","XOX","OXO"]
# board = ["XXXX","XOOX","XXOX","XOXX"]
board = ["OXOOOX","OOXXXO","XXXXXO","OOOOXX","XXOOXO","OOXXXX"]
for row in board:
print([ row])
Solution().solve(board)
for row in board:
print([ row])
### DFS maximum recursion depth exceeded
# See if '0' is attached to edge.
# class Solution2(object):
# def solve(self, board):
# """
# :type board: List[List[str]]
# :rtype: void Do not return anything, modify board in-place instead.
# """
# if not board:
# return
#
# m, n = len(board), len(board[0]) #Special cases, if m or n <= 2. return all XXX
# candidates = [] #i, j
# isVisited = [[ False for j in xrange(n)] for i in xrange(m)]
#
# # Scan all sides and get candidates
# for j in xrange(n):
# if board[0][j] == 'O':
# candidates.append([0, j])
# if m == 1:
# break
# if board[m-1][j] == 'O':
# candidates.append([m-1, j])
#
# for i in xrange(m):
# if board[i][0] == 'O':
# candidates.append([i, 0])
# if n == 1:
# break
# if board[i][n-1] == 'O':
# candidates.append([i, n-1])
#
# print("candidates: ")
# print(candidates)
# # DFS candidates to get noFLip[]
# noFlip = []
# for i, j in candidates:
# if isVisited[i][j]:
# continue
#
# self.dfs(i, j, isVisited, candidates, board, m, n)
#
# # Rescan to flip
# for i in xrange(1, m-1):
# for j in xrange(1, n-1):
# if board[i][j] == 'O' and [i, j] not in noFlip:
# board[i][j] == 'X'
#
# return
#
#
# def dfs(self, i, j, isVisited, noFlip, board, m, n):
# if isVisited[i][j]:
# return
#
# noFlip.append([i, j])
# isVisited[i][j] = True
#
# if i+1 < m and not isVisited[i+1][j]:
# self.dfs(i+1, j, isVisited, noFlip, board, m, n)
# if i-1 >= 0 and not isVisited[i-1][j]:
# self.dfs(i-1, j, isVisited, noFlip, board, m, n)
# if j+1 < n and not isVisited[i][j+1]:
# self.dfs(i, j+1, isVisited, noFlip, board, m, n)
# if j-1 >= 0 and not isVisited[i][j-1]:
# self.dfs(i, j-1, isVisited, noFlip, board, m, n)
#
|
shawncaojob/LC
|
PY/130_surrounded_regions.py
|
Python
|
gpl-3.0
| 9,719
|
[
"VisIt"
] |
a44de840ff926e0009962ff08928e14722286862a290824059a1db24b7cfb0c1
|
## ENVISIoN
##
## Copyright (c) 2021 Gabriel Anderberg, Didrik Axén, Adam Engman,
## Kristoffer Gubberud Maras, Joakim Stenborg
## All rights reserved.
##
## Redistribution and use in source and binary forms, with or without
## modification, are permitted provided that the following conditions are met:
##
## 1. Redistributions of source code must retain the above copyright notice, this
## list of conditions and the following disclaimer.
## 2. Redistributions in binary form must reproduce the above copyright notice,
## this list of conditions and the following disclaimer in the documentation
## and/or other materials provided with the distribution.
##
## THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
## ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
## WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
## DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
## ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
## (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
## LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
## ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
## (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
## SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
##
## ##############################################################################################
# Preparements for testing
import os, sys, h5py
import pytest
# path to current directory
TEST_DIR = os.path.dirname(os.path.realpath(__file__))
sys.path.append(os.path.join(TEST_DIR, os.pardir))
import envisionpy.hdf5parser
########################################################################################
# Test of a VASP-directory which is compatible with the unitcell parser.
# Path to the vasp directory
PATH_TO_VASP_CALC = os.path.join(TEST_DIR, "resources/MD/VASP/Al_300K")
# Path to the resulting hdf5 file
PATH_TO_HDF5 = os.path.join(TEST_DIR, "md_demo.hdf5")
def test_parse_md():
"""Testing if correct MD parsing of a VASP-directory.
Parameters
----------
None
Returns
-------
None
"""
# Parse
envisionpy.hdf5parser.mol_dynamic_parser(PATH_TO_HDF5, PATH_TO_VASP_CALC)
# Test if the generated HDF5-file contains correct information
if os.path.isfile(PATH_TO_HDF5):
with h5py.File(PATH_TO_HDF5, 'r') as h5:
assert '/MD' in h5
# cleanup
os.remove(PATH_TO_HDF5)
test_parse_md()
|
rartino/ENVISIoN
|
unit_testing/test_moldyn_parsing.py
|
Python
|
bsd-2-clause
| 2,654
|
[
"VASP"
] |
f110d7ec52c3f18ecc93ae7b458431147adf63730f1bc8c1dec8f175dcbae7a6
|
# coding: utf-8
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
"""
This module defines classes to represent the phonon density of states, etc.
"""
import numpy as np
import scipy.constants as const
from monty.functools import lazy_property
from monty.json import MSONable
from pymatgen.core.structure import Structure
from pymatgen.util.coord import get_linear_interpolated_value
BOLTZ_THZ_PER_K = const.value("Boltzmann constant in Hz/K") / const.tera # Boltzmann constant in THz/K
THZ_TO_J = const.value("hertz-joule relationship") * const.tera
def coth(x):
"""
Coth function.
Args:
x (): value
Returns:
coth(x)
"""
return 1.0 / np.tanh(x)
class PhononDos(MSONable):
"""
Basic DOS object. All other DOS objects are extended versions of this
object.
"""
def __init__(self, frequencies, densities):
"""
Args:
frequencies: A sequences of frequencies in THz
densities: A list representing the density of states.
"""
self.frequencies = np.array(frequencies)
self.densities = np.array(densities)
def get_smeared_densities(self, sigma):
"""
Returns the densities, but with a Gaussian smearing of
std dev sigma applied.
Args:
sigma: Std dev of Gaussian smearing function.
Returns:
Gaussian-smeared densities.
"""
from scipy.ndimage.filters import gaussian_filter1d
diff = [self.frequencies[i + 1] - self.frequencies[i] for i in range(len(self.frequencies) - 1)]
avgdiff = sum(diff) / len(diff)
smeared_dens = gaussian_filter1d(self.densities, sigma / avgdiff)
return smeared_dens
def __add__(self, other):
"""
Adds two DOS together. Checks that frequency scales are the same.
Otherwise, a ValueError is thrown.
Args:
other: Another DOS object.
Returns:
Sum of the two DOSs.
"""
if not all(np.equal(self.frequencies, other.frequencies)):
raise ValueError("Frequencies of both DOS are not compatible!")
densities = self.densities + other.densities
return PhononDos(self.frequencies, densities)
def __radd__(self, other):
"""
Reflected addition of two DOS objects
Args:
other: Another DOS object.
Returns:
Sum of the two DOSs.
"""
return self.__add__(other)
def get_interpolated_value(self, frequency):
"""
Returns interpolated density for a particular frequency.
Args:
frequency: frequency to return the density for.
"""
return get_linear_interpolated_value(self.frequencies, self.densities, frequency)
def __str__(self):
"""
Returns a string which can be easily plotted (using gnuplot).
"""
stringarray = ["#{:30s} {:30s}".format("Frequency", "Density")]
for i, frequency in enumerate(self.frequencies):
stringarray.append("{:.5f} {:.5f}".format(frequency, self.densities[i]))
return "\n".join(stringarray)
@classmethod
def from_dict(cls, d):
"""
Returns PhononDos object from dict representation of PhononDos.
"""
return cls(d["frequencies"], d["densities"])
def as_dict(self):
"""
Json-serializable dict representation of PhononDos.
"""
return {
"@module": self.__class__.__module__,
"@class": self.__class__.__name__,
"frequencies": list(self.frequencies),
"densities": list(self.densities),
}
@lazy_property
def ind_zero_freq(self):
"""
Index of the first point for which the freqencies are equal or greater than zero.
"""
ind = np.searchsorted(self.frequencies, 0)
if ind >= len(self.frequencies):
raise ValueError("No positive frequencies found")
return ind
@lazy_property
def _positive_frequencies(self):
"""
Numpy array containing the list of positive frequencies
"""
return self.frequencies[self.ind_zero_freq :]
@lazy_property
def _positive_densities(self):
"""
Numpy array containing the list of densities corresponding to positive frequencies
"""
return self.densities[self.ind_zero_freq :]
def cv(self, t, structure=None):
"""
Constant volume specific heat C_v at temperature T obtained from the integration of the DOS.
Only positive frequencies will be used.
Result in J/(K*mol-c). A mol-c is the abbreviation of a mole-cell, that is, the number
of Avogadro times the atoms in a unit cell. To compare with experimental data the result
should be divided by the number of unit formulas in the cell. If the structure is provided
the division is performed internally and the result is in J/(K*mol)
Args:
t: a temperature in K
structure: the structure of the system. If not None it will be used to determine the numer of
formula units
Returns:
Constant volume specific heat C_v
"""
if t == 0:
return 0
freqs = self._positive_frequencies
dens = self._positive_densities
def csch2(x):
return 1.0 / (np.sinh(x) ** 2)
wd2kt = freqs / (2 * BOLTZ_THZ_PER_K * t)
cv = np.trapz(wd2kt ** 2 * csch2(wd2kt) * dens, x=freqs)
cv *= const.Boltzmann * const.Avogadro
if structure:
formula_units = structure.composition.num_atoms / structure.composition.reduced_composition.num_atoms
cv /= formula_units
return cv
def entropy(self, t, structure=None):
"""
Vibrational entropy at temperature T obtained from the integration of the DOS.
Only positive frequencies will be used.
Result in J/(K*mol-c). A mol-c is the abbreviation of a mole-cell, that is, the number
of Avogadro times the atoms in a unit cell. To compare with experimental data the result
should be divided by the number of unit formulas in the cell. If the structure is provided
the division is performed internally and the result is in J/(K*mol)
Args:
t: a temperature in K
structure: the structure of the system. If not None it will be used to determine the numer of
formula units
Returns:
Vibrational entropy
"""
if t == 0:
return 0
freqs = self._positive_frequencies
dens = self._positive_densities
wd2kt = freqs / (2 * BOLTZ_THZ_PER_K * t)
s = np.trapz((wd2kt * coth(wd2kt) - np.log(2 * np.sinh(wd2kt))) * dens, x=freqs)
s *= const.Boltzmann * const.Avogadro
if structure:
formula_units = structure.composition.num_atoms / structure.composition.reduced_composition.num_atoms
s /= formula_units
return s
def internal_energy(self, t, structure=None):
"""
Phonon contribution to the internal energy at temperature T obtained from the integration of the DOS.
Only positive frequencies will be used.
Result in J/mol-c. A mol-c is the abbreviation of a mole-cell, that is, the number
of Avogadro times the atoms in a unit cell. To compare with experimental data the result
should be divided by the number of unit formulas in the cell. If the structure is provided
the division is performed internally and the result is in J/mol
Args:
t: a temperature in K
structure: the structure of the system. If not None it will be used to determine the numer of
formula units
Returns:
Phonon contribution to the internal energy
"""
if t == 0:
return self.zero_point_energy(structure=structure)
freqs = self._positive_frequencies
dens = self._positive_densities
wd2kt = freqs / (2 * BOLTZ_THZ_PER_K * t)
e = np.trapz(freqs * coth(wd2kt) * dens, x=freqs) / 2
e *= THZ_TO_J * const.Avogadro
if structure:
formula_units = structure.composition.num_atoms / structure.composition.reduced_composition.num_atoms
e /= formula_units
return e
def helmholtz_free_energy(self, t, structure=None):
"""
Phonon contribution to the Helmholtz free energy at temperature T obtained from the integration of the DOS.
Only positive frequencies will be used.
Result in J/mol-c. A mol-c is the abbreviation of a mole-cell, that is, the number
of Avogadro times the atoms in a unit cell. To compare with experimental data the result
should be divided by the number of unit formulas in the cell. If the structure is provided
the division is performed internally and the result is in J/mol
Args:
t: a temperature in K
structure: the structure of the system. If not None it will be used to determine the numer of
formula units
Returns:
Phonon contribution to the Helmholtz free energy
"""
if t == 0:
return self.zero_point_energy(structure=structure)
freqs = self._positive_frequencies
dens = self._positive_densities
wd2kt = freqs / (2 * BOLTZ_THZ_PER_K * t)
f = np.trapz(np.log(2 * np.sinh(wd2kt)) * dens, x=freqs)
f *= const.Boltzmann * const.Avogadro * t
if structure:
formula_units = structure.composition.num_atoms / structure.composition.reduced_composition.num_atoms
f /= formula_units
return f
def zero_point_energy(self, structure=None):
"""
Zero point energy energy of the system. Only positive frequencies will be used.
Result in J/mol-c. A mol-c is the abbreviation of a mole-cell, that is, the number
of Avogadro times the atoms in a unit cell. To compare with experimental data the result
should be divided by the number of unit formulas in the cell. If the structure is provided
the division is performed internally and the result is in J/mol
Args:
t: a temperature in K
structure: the structure of the system. If not None it will be used to determine the numer of
formula units
Returns:
Phonon contribution to the internal energy
"""
freqs = self._positive_frequencies
dens = self._positive_densities
zpe = 0.5 * np.trapz(freqs * dens, x=freqs)
zpe *= THZ_TO_J * const.Avogadro
if structure:
formula_units = structure.composition.num_atoms / structure.composition.reduced_composition.num_atoms
zpe /= formula_units
return zpe
class CompletePhononDos(PhononDos):
"""
This wrapper class defines a total dos, and also provides a list of PDos.
.. attribute:: pdos
Dict of partial densities of the form {Site:Densities}
"""
def __init__(self, structure, total_dos, pdoss):
"""
Args:
structure: Structure associated with this particular DOS.
total_dos: total Dos for structure
pdoss: The pdoss are supplied as an {Site: Densities}
"""
super().__init__(frequencies=total_dos.frequencies, densities=total_dos.densities)
self.pdos = {s: np.array(d) for s, d in pdoss.items()}
self.structure = structure
def get_site_dos(self, site):
"""
Get the Dos for a site.
Args:
site: Site in Structure associated with CompletePhononDos.
Returns:
PhononDos containing summed orbital densities for site.
"""
return PhononDos(self.frequencies, self.pdos[site])
def get_element_dos(self):
"""
Get element projected Dos.
Returns:
dict of {Element: Dos}
"""
el_dos = {}
for site, atom_dos in self.pdos.items():
el = site.specie
if el not in el_dos:
el_dos[el] = np.array(atom_dos)
else:
el_dos[el] += np.array(atom_dos)
return {el: PhononDos(self.frequencies, densities) for el, densities in el_dos.items()}
@classmethod
def from_dict(cls, d):
"""
Returns CompleteDos object from dict representation.
"""
tdos = PhononDos.from_dict(d)
struct = Structure.from_dict(d["structure"])
pdoss = {}
for at, pdos in zip(struct, d["pdos"]):
pdoss[at] = pdos
return cls(struct, tdos, pdoss)
def as_dict(self):
"""
Json-serializable dict representation of CompletePhononDos.
"""
d = {
"@module": self.__class__.__module__,
"@class": self.__class__.__name__,
"structure": self.structure.as_dict(),
"frequencies": list(self.frequencies),
"densities": list(self.densities),
"pdos": [],
}
if len(self.pdos) > 0:
for at in self.structure:
d["pdos"].append(list(self.pdos[at]))
return d
def __str__(self):
return "Complete phonon DOS for " + str(self.structure)
|
richardtran415/pymatgen
|
pymatgen/phonon/dos.py
|
Python
|
mit
| 13,551
|
[
"Avogadro",
"Gaussian",
"pymatgen"
] |
e38dd8255352084ee95170a6cfdde517c91c0d8fe1d6559cd00defbdd141377d
|
"""
A myopic global optimizer class.
Author:
Ilias Bilionis
Date:
5/1/2015
"""
__all__ = ['GlobalOptimizer']
import numpy as np
from collections import Iterable
import math
import sys
import warnings
warnings.simplefilter('ignore')
import GPy
import design
import os
from . import colorAlpha_to_rgb
class GlobalOptimizer(object):
"""
A global optimizer class.
It is essentially a myopic, sequential, global optimizer.
:param func: The function you wish to optimize.
:arapm args: Additional arguments for the function we optimize.
:param kernel: An initialized kernel that you might want to use. If ``None``,
then we will use an RBF with standard parameters.
:param gp_type: The GPy model class you would like to use for the regression.
"""
# The initial design
_X_init = None
# The initial observations
_Y_init = None
# The total design we have available
_X_design = None
# The indexes of the observations we have made so far (list of integers)
_idx_X_obs = None
# The objectives we have observed so far (list of whatever the observations are)
_Y_obs = None
# The function we wish to optimize
_func = None
# Extra arguments to func
_args = None
# The kernel object to be used for the GP
_kernel = None
# The kernel type used for the GP by default
_kernel_type = None
# The GPy regression model that we are using
_gp_type = None
# The trained GP model representing the objective
_model = None
# The prior we use for the variance of the likelihood
_model_like_variance_prior = None
# The expected improvement of the current model
_ei = None
# The denoised posterior GP samples
_denoised_posterior_samples = None
# The indices of the best designs of each GP sample
_best_idx = None
@property
def X_init(self):
"""
:getter: Get the initial design.
"""
return self._X_init
@X_init.setter
def X_init(self, value):
"""
:setter: Set the initial design.
"""
assert isinstance(value, Iterable)
self._X_init = value
@property
def Y_init(self):
"""
:getter: Get the initial observations.
"""
return self._Y_init
@Y_init.setter
def Y_init(self, value):
"""
:setter: Set the initial observations.
"""
if value is not None:
assert isinstance(value, Iterable)
value = np.array(value)
self._Y_init = value
@property
def X_design(self):
"""
:getter: Get the design.
"""
return self._X_design
@X_design.setter
def X_design(self, value):
"""
:setter: Set the design.
"""
assert isinstance(value, Iterable)
self._X_design = value
@property
def idx_X_obs(self):
"""
:getter: The indexes of currently observed design points.
"""
return self._idx_X_obs
@property
def Y_obs(self):
"""
:getter: The values of the currently observed design points.
"""
return self._Y_obs
@property
def func(self):
"""
:getter: Get the function we are optimizing.
"""
return self._func
@func.setter
def func(self, value):
"""
:setter: Set the function we are optimizing.
"""
assert hasattr(value, '__call__')
self._func = value
@property
def args(self):
"""
:getter: The extra arguments of func.
"""
return self._args
@property
def X(self):
"""
:getter: Get all the currently observed points.
"""
return np.vstack([self.X_init, self.X_design[self.idx_X_obs]])
@property
def Y(self):
"""
:getter: Get all the currently observed objectives.
"""
if len(self.Y_obs) == 0:
return np.array(self.Y_init)
return np.hstack([self.Y_init, self.Y_obs])
@property
def num_dim(self):
"""
:getter: The number of design dimensions.
"""
return self.X_init.shape[1]
@property
def kernel(self):
"""
:getter: The kernel to be used for the GP.
"""
if self._kernel is None:
self._kernel = self._get_fresh_kernel()
return self._kernel.copy()
@property
def kernel_type(self):
"""
:getter: The kernel type used for the GP by default.
"""
return self._kernel_type
@property
def gp_type(self):
"""
:getter: The GPy model to be used for the regression.
"""
return self._gp_type
@property
def model(self):
"""
:getter: Get the GP model of the objective.
"""
if self._model is None:
self._model = self._get_fresh_model()
return self._model
@property
def model_like_variance_prior(self):
"""
:getter: Get the prior of the variance of the likelihood that we
are using.
"""
return self._model_like_variance_prior
@property
def ei(self):
"""
:getter: The expected improvement on the design points.
"""
if self._ei is None:
ei_all = self.model.pymc_mcmc.trace('denoised_ei_min')[:]
ei = ei_all.mean(axis=0)
self._ei = ei
return self._ei
def __init__(self, X_init, X_design, func, args=(), Y_init=None,
kernel=None,
kernel_type=GPy.kern.RBF,
gp_type=GPy.models.GPRegression,
model_like_variance_prior=GPy.priors.Jeffreys(),
kern_variance_prior=GPy.priors.Jeffreys(),
kern_lengthscale_prior=GPy.priors.Jeffreys(),
optimize_model_before_init_mcmc=True,
optimize_model_before_mcmc=False,
optimize_model_num_restarts=1,
num_predict=100,
fixed_noise=False,
max_it=100,
add_at_least=10,
rel_tol=1e-2,
num_mcmc_samples=1000,
num_mcmc_burn=100,
num_mcmc_thin=10,
mcmc_tune_throughout=False,
mcmc_progress_bar=True,
mcmc_start_from_scratch=False,
verbose=True,
make_plots=True,
plot_ext='png',
plot_prefix='optimizer',
true_func=None,
new_fig_func=None,
renew_design=False):
"""
Initialize the object.
"""
self.X_init = X_init
self.X_design = X_design
self.Y_init = Y_init
self.func = func
self._args = args
self._kernel = kernel
self._kernel_type = kernel_type
self._gp_type = gp_type
self._model_like_variance_prior = model_like_variance_prior
self.kern_variance_prior = kern_variance_prior
self.kern_lengthscale_prior = kern_lengthscale_prior
self._idx_X_obs = []
self._Y_obs = []
self.num_predict = 100
self.optimize_model_before_init_mcmc = optimize_model_before_init_mcmc
self.optimize_model_before_mcmc = optimize_model_before_mcmc
self.optimize_model_num_restarts = optimize_model_num_restarts
self.fixed_noise = fixed_noise
self.max_it = max_it
self.add_at_least = add_at_least
self.rel_tol = rel_tol
self.num_mcmc_samples = num_mcmc_samples
self.num_mcmc_burn = num_mcmc_burn
self.num_mcmc_thin = num_mcmc_thin
self.mcmc_tune_throughout = mcmc_tune_throughout
self.mcmc_progress_bar = mcmc_progress_bar
self.mcmc_start_from_scratch = mcmc_start_from_scratch
self.verbose = True
self.make_plots = make_plots
self.plot_prefix = plot_prefix
self.plot_ext = plot_ext
self.true_func = true_func
if self.true_func is not None:
# Assuming this is a test and that true_func is very cheap
self.Y_true = np.array([self.true_func(x) for x in self.X_design])[:, None]
i_best = np.argmin(self.Y_true)
self.X_true_best = self.X_design[i_best, :]
self.Y_true_best = self.Y_true[i_best, 0]
if new_fig_func is None:
def new_fig():
import matplotlib.pyplot as plt
return plt.subplots()
new_fig_func = new_fig
self.new_fig_func = new_fig_func
self.renew_design = renew_design
self.initialize()
def _get_fresh_kernel(self):
"""
:getter: Get a kernel that is a fresh copy of the kernel the user
provided us with.
"""
kernel = self.kernel_type(self.num_dim, ARD=True)
kernel.variance.unconstrain()
kernel.variance.set_prior(self.kern_variance_prior)
kernel.lengthscale.unconstrain()
kernel.lengthscale.set_prior(self.kern_lengthscale_prior)
return kernel
def _get_fresh_model(self):
"""
:getter: Get a fresh Gaussian process model.
"""
model = self.gp_type(self.X, self.Y[:, None], self.kernel)
model.likelihood.variance.unconstrain()
model._X_predict = self.X_design
if self.fixed_noise:
model.likelihood.variance.unconstrain()
model.Gaussian_noise.variance.constrain_fixed(self.fixed_noise)
else:
model.likelihood.variance.set_prior(self.model_like_variance_prior)
model._num_predict = self.num_predict
model.pymc_trace_denoised_min()
model.pymc_trace_denoised_argmin()
model.pymc_trace_expected_improvement(denoised=True)
return model
def initialize(self):
"""
Initialize everything.
Computes the initial output data if they are not provided already.
"""
if self.Y_init is None:
if self.verbose:
print '\t> did not find observed objectives'
sys.stdout.write('\t> computing the objectives now... ')
sys.stdout.flush()
self.Y_init = [self.func(self.X_init[i, :], *self.args) for i in range(self.X_init.shape[0])]
if self.verbose:
sys.stdout.write('done!\n')
def optimize_step(self, it):
"""
Perform a single optimization step.
"""
# Train current model
self._ei = None
self._denoised_posterior_samples = None
self._best_idx = None
if self.mcmc_start_from_scratch:
self._model = None
else:
self.model.set_XY(self.X, self.Y[:, None])
if self.renew_design:
self.X_design = design.latin_center(*self.X_design.shape)
self.model._X_predict = self.X_design
if ((it == 0 and self.optimize_model_before_init_mcmc) or
self.optimize_model_before_mcmc):
self.model.optimize()
print str(self.model)
print self.model.kern.lengthscale
if self.verbose:
print '\t> starting mcmc sampling'
self.model.pymc_mcmc.sample(self.num_mcmc_samples,
burn=self.num_mcmc_burn,
thin=self.num_mcmc_thin,
tune_throughout=self.mcmc_tune_throughout,
progress_bar=self.mcmc_progress_bar)
# Find best expected improvement
ei = self.ei
i = np.argmax(ei)
# Do the simulation and add it
self.idx_X_obs.append(i)
self.Y_obs.append(self.func(self.X_design[i], *self.args))
if self.verbose:
print '\t> design point id to be added : {0:d}'.format(i)
print '\t> maximum expected improvement: {0:1.3f}'.format(ei[i])
return i, ei[i]
def optimize(self):
"""
Optimize the objective.
"""
if self.verbose:
print '> initializing algorithm'
self.ei_values = []
self.y_best_p500 = []
self.y_best_p025 = []
self.y_best_p975 = []
self.y_obs_best_p500 = []
self.y_obs_best_p025 = []
self.y_obs_best_p975 = []
self.x_best_p500 = []
self.x_best_p025 = []
self.x_best_p975 = []
for it in xrange(self.max_it):
i, ei_max = self.optimize_step(it)
self.ei_values.append(ei_max)
if self.make_plots:
self.plot(it)
if self.verbose:
print '> checking convergence'
if it >= self.add_at_least and ei_max / self.ei_values[0] < self.rel_tol:
if self.verbose:
print '*** Converged (ei[i_max] / eimax0 = {0:1.7f})'.format(
ei_max / self.ei_values[0])
break
else:
print '> rel. ei = {0:1.3f}'.format(ei_max / self.ei_values[0])
def plot(self, it):
"""
Plot the results of our analysis at the current step of the algorithm.
"""
if self.verbose:
print '> plotting intermediate results'
try:
self.plot_mcmc_diagnostics(it)
except:
pass
try:
self.plot_opt_status(it)
except:
pass
try:
self.plot_opt_dist(it)
except:
pass
try:
self.plot_opt_joint(it)
except:
pass
def _get_nd(self):
"""
Get the number of digits used for filenames.
"""
return len(str(self.max_it))
def _fig_name(self, name, it):
"""
Get the figure name.
"""
return self.plot_prefix + '_' + str(it).zfill(self._get_nd()) \
+ '_' + name + '.' + self.plot_ext
def _hyper_id(self, data, i):
return str(i + 1).zfill(len(str(data.shape[1])))
def _hyper_name(self, data, name, i):
return name + '_' + self._hyper_id(data, i)
def _hyper_fig_name(self, figname, data, name, i, it):
return self._fig_name(figname + '_' + self._hyper_name(data, name, i),
it)
def _hyper_tex(self, data, name, i):
return r'$\%s_{%s}$' % (name, self._hyper_id(data, i))
def plot_autocorrelations(self, data, name, it):
"""
Plot all autocorrelation plots.
"""
if self.verbose:
print '\t\t> plotting autocorrelations'
import matplotlib.pyplot as plt
for i in xrange(data.shape[1]):
fig, ax = self.new_fig_func()
ax.acorr(data[:, i], maxlags=data.shape[0] / 2)
figname = self._hyper_fig_name('acorr', data, name, i, it)
if self.verbose:
print '\t\t> writing:', figname
fig.savefig(figname)
plt.close(fig)
def plot_trace(self, data, name, it):
if self.verbose:
print '\t\t> ploting trace of', name
import matplotlib.pyplot as plt
fig, ax = self.new_fig_func()
handles = ax.plot(data)
if data.shape[1] <= 5:
labels = [self._hyper_tex(data, name, i)
for i in xrange(data.shape[1])]
fig.legend(handles, labels)
figname = self._fig_name('trace_' + name, it)
if self.verbose:
print '\t\t> writing:', figname
fig.savefig(figname)
plt.close(fig)
def plot_dist(self, data, name, it, labels=None):
if self.verbose:
print '\t\t> ploting distribution of', name
import matplotlib.pyplot as plt
import seaborn as sns
for i in xrange(data.shape[1]):
fig, ax = self.new_fig_func()
sns.distplot(data[:, i])
if labels is not None:
ax.set_xlabel(labels[i])
else:
ax.set_xlabel(self._hyper_tex(data, name, i))
figname = self._hyper_fig_name('dist', data, name, i, it)
if self.verbose:
print '\t\t> writing:', figname
fig.savefig(figname)
plt.close(fig)
def plot_opt_status(self, it):
self.plot_opt_status_gen(it)
if self.num_dim == 1:
self.plot_opt_status_1d(it)
elif self.num_dim == 2:
self.plot_opt_status_2d(it)
def plot_opt_status_1d(self, it):
if self.verbose:
print '\t\t> plotting the optimization status'
import matplotlib.pyplot as plt
import seaborn as sns
Y_d = self.denoised_posterior_samples
fig, ax1 = self.new_fig_func()
ax2 = ax1.twinx()
p_025 = np.percentile(Y_d, 2.5, axis=0)
p_500 = np.percentile(Y_d, 50, axis=0)
p_975 = np.percentile(Y_d, 97.5, axis=0)
ax1.fill_between(self.X_design.flatten(), p_025, p_975,
color=colorAlpha_to_rgb(sns.color_palette()[0], 0.25),
label='95\% error')
ax1.plot(self.X_design, p_500, color=sns.color_palette()[0],
label='Pred. mean')
ax1.plot(self.X[:-1, :], self.Y[:-1],
'kx', markersize=10, markeredgewidth=2,
label='Observations')
if self.true_func is not None:
ax1.plot(self.X_design, self.Y_true,
':', color=sns.color_palette()[2])
ax1.plot(self.X[-1, 0], self.Y[-1], 'o',
markersize=10, markeredgewidth=2,
color=sns.color_palette()[1])
ax2.plot(self.X_design, self.ei / self.ei_values[0],
'--', color=sns.color_palette()[3],
label='Exp. improvement')
ax2.set_ylim(0, 1.5)
plt.setp(ax2.get_yticklabels(), color=sns.color_palette()[3])
figname = self._fig_name('state', it)
if self.verbose:
print '\t\t> writing:', figname
fig.savefig(figname)
plt.close(fig)
def plot_opt_status_gen(self, it):
import matplotlib.pyplot as plt
import seaborn as sns
# Plot the expected improvement so far
if self.verbose:
print '\t\t> plotting the max expected improvement'
fig, ax = self.new_fig_func()
rel_ei = self.ei_values / self.ei_values[0]
ax.plot(np.arange(1, it + 2), rel_ei)
ax.set_xlabel('Iteration')
ax.set_ylabel('Maximum expected improvement')
figname = self._fig_name('ei', it)
if self.verbose:
print '\t\t> writing:', figname
fig.savefig(figname)
plt.close(fig)
# Plot the range of the best objectives we have found so far
if self.verbose:
print '\t\t> plotting stat. about optimal objective value'
y_best_p500 = np.median(self.Y_best)
y_best_p025 = np.percentile(self.Y_best, 2.5)
y_best_p975 = np.percentile(self.Y_best, 97.5)
self.y_best_p500.append(y_best_p500)
self.y_best_p025.append(y_best_p025)
self.y_best_p975.append(y_best_p975)
fig, ax = self.new_fig_func()
idx = np.arange(1, it + 2)
ax.fill_between(idx, self.y_best_p025,
self.y_best_p975,
color=colorAlpha_to_rgb(sns.color_palette()[0], 0.25))
ax.plot(idx, self.y_best_p500)
if self.true_func is not None:
ax.plot(idx, [self.Y_true_best] * idx.shape[0],
'--', color=sns.color_palette()[2])
ax.set_xlabel('Iteration')
ax.set_ylabel('Optimal objective')
figname = self._fig_name('objective', it)
if self.verbose:
print '\t\t> writing:', figname
fig.savefig(figname)
plt.close(fig)
# Do the same for the best observed values
y_obs_best = self.model.pymc_mcmc.trace('min_denoised_output')[:]
y_obs_best_p500 = np.percentile(y_obs_best, 50)
y_obs_best_p025 = np.percentile(y_obs_best, 2.5)
y_obs_best_p975 = np.percentile(y_obs_best, 97.5)
self.y_obs_best_p500.append(y_obs_best_p500)
self.y_obs_best_p025.append(y_obs_best_p025)
self.y_obs_best_p975.append(y_obs_best_p975)
fig, ax = self.new_fig_func()
idx = np.arange(1, it + 2)
ax.fill_between(idx, self.y_obs_best_p025,
self.y_obs_best_p975,
color=colorAlpha_to_rgb(sns.color_palette()[0], 0.25))
ax.plot(idx, self.y_obs_best_p500)
if self.true_func is not None:
ax.plot(idx, [self.Y_true_best] * idx.shape[0],
'--', color=sns.color_palette()[2])
ax.set_xlabel('Iteration')
ax.set_ylabel('Denoised observed optimal objective')
figname = self._fig_name('denoised_objective', it)
if self.verbose:
print '\t\t> writing:', figname
fig.savefig(figname)
plt.close(fig)
# What is the design point corresponding to the smallest of all
# the min_denoised?
X_best_file = self.plot_prefix + '_' + str(it).zfill(self._get_nd()) + '_X_best.npy'
if self.verbose:
print '\t\t> writing:', X_best_file
np.save(X_best_file, self.model.X)
# Do the same for the design (only 1D)
if self.num_dim == 1:
if self.verbose:
print '\t\t> plotting stat. about optimal design'
x_best_p500 = np.median(self.X_best.flatten())
x_best_p025 = np.percentile(self.X_best.flatten(), 2.5)
x_best_p975 = np.percentile(self.X_best.flatten(), 97.5)
self.x_best_p500.append(x_best_p500)
self.x_best_p025.append(x_best_p025)
self.x_best_p975.append(x_best_p975)
fig, ax = self.new_fig_func()
ax.fill_between(idx, self.x_best_p025,
self.x_best_p975,
color=colorAlpha_to_rgb(sns.color_palette()[0], 0.25))
ax.plot(idx, self.x_best_p500)
if self.true_func is not None:
ax.plot(idx, [self.X_true_best.flatten()] * idx.shape[0],
'--', color=sns.color_palette()[2])
ax.set_xlabel('Iteration')
ax.set_ylabel('Optimal design')
figname = self._fig_name('design', it)
fig.savefig(figname)
if self.verbose:
print '\t\t> writing:', figname
plt.close(fig)
def plot_opt_joint(self, it):
if self.num_dim == 1:
self.plot_opt_joint_1d(it)
def plot_opt_dist(self, it):
self.plot_dist(self.Y_best[:, None], 'objective', it,
labels=['Optimal objective'])
if self.num_dim == 1:
self.plot_dist(self.X_best, 'design', it,
labels=['Optimal design'])
@property
def denoised_posterior_samples(self):
"""
:getter: Get the denoised posterior samples from the GP.
"""
if self._denoised_posterior_samples is None:
Y = self.model.pymc_mcmc.trace('denoised_posterior_samples')[:]
Y = np.vstack(Y)
self._denoised_posterior_samples = Y
return self._denoised_posterior_samples
@property
def num_posterior_samples(self):
"""
:getter: The number of posterior samples of the GP.
"""
return self.denoised_posterior_samples.shape[0]
@property
def best_idx(self):
"""
:getter: The indices of the best design of each GP sample.
"""
if self._best_idx is None:
self._best_idx = np.argmin(self.denoised_posterior_samples, axis=1)
return self._best_idx
@property
def X_best(self):
"""
:getter: Get samples of the optimal designs.
"""
return self.X_design[self.best_idx, :]
@property
def Y_best(self):
"""
:getter: Get the values of the optimal designs.
"""
return np.array([self.denoised_posterior_samples[i, self.best_idx[i]]
for i in xrange(self.num_posterior_samples)])
def plot_opt_joint_1d(self, it):
import matplotlib.pyplot as plt
import seaborn as sns
g = sns.jointplot(self.X_best.flatten(), self.Y_best, kind='kde')
g.set_axis_labels('Optimal design', 'Optimal objective')
if self.true_func is not None:
g.ax_joint.plot(self.X_true_best[0], self.Y_true_best, 'x',
color=sns.color_palette()[2],
markersize=10, markeredgewidth=2)
plt.savefig(self._fig_name('opt', it))
def plot_mcmc_diagnostics(self, it):
"""
Plot diagnostics about the MCMC chain.
"""
if self.verbose:
print '\t> plotting mcmc diagnostics'
theta = self.model.pymc_mcmc.trace('transformed_hyperparameters')[:]
phi = self.model.pymc_mcmc.trace('hyperparameters')[:]
self.plot_autocorrelations(theta, 'theta', it)
self.plot_trace(theta, 'theta', it)
self.plot_trace(phi, 'phi', it)
self.plot_dist(theta, 'theta', it)
self.plot_dist(phi, 'phi', it)
|
PredictiveScienceLab/py-bgo
|
pybgo/_global_optimizer.py
|
Python
|
mit
| 25,564
|
[
"Gaussian"
] |
188da916fb66f8d288fae0813fd6a8d1738812a637eb08accbcd5f719e3d3f28
|
#pylint: disable=missing-docstring
#* This file is part of the MOOSE framework
#* https://www.mooseframework.org
#*
#* All rights reserved, see COPYRIGHT for full restrictions
#* https://github.com/idaholab/moose/blob/master/COPYRIGHT
#*
#* Licensed under LGPL 2.1, please see LICENSE for details
#* https://www.gnu.org/licenses/lgpl-2.1.html
from .Options import Options
def get_options():
"""
Returns options for vtk fonts.
"""
opt = Options()
opt.add('text_color', [1, 1, 1], "The text color.")
opt.add('text_shadow', False, "Toggle text shadow.")
opt.add('justification', 'left', "Set the font justification.",
allow=['left', 'center', 'right'])
opt.add('vertical_justification', 'bottom', "The vertical text justification.",
allow=['bottom', 'middle', 'top'])
opt.add('text_opacity', 1, "The text opacity.", vtype=float)
opt.add('text', None, "The text to display.", vtype=str)
opt.add('font_size', 24, "The text font size.", vtype=int)
opt.add('font_family', None, "The font family.", vtype=str)
opt.add('bold', False, "Enable/disable text bolding.", vtype=bool)
opt.add('italic', False, "Enable/disable text italic.", vtype=bool)
return opt
def set_options(tprop, options):
"""
Applies font options to vtkTextProperty object.
Inputs:
tprop: A vtk.vtkTextProperty object for applying options.
options: The Options object containing the settings to apply.
"""
if options.isOptionValid('text_color'):
tprop.SetColor(options['text_color'])
if options.isOptionValid('text_shadow'):
tprop.SetShadow(options['text_shadow'])
if options.isOptionValid('justification'):
idx = options.raw('justification').allow.index(options['justification'])
tprop.SetJustification(idx)
if options.isOptionValid('vertical_justification'):
idx = options.raw('vertical_justification').allow.index(options['vertical_justification'])
tprop.SetVerticalJustification(idx)
if options.isOptionValid('text_opacity'):
tprop.SetOpacity(options['text_opacity'])
if options.isOptionValid('font_family'):
tprop.SetFontFamilyAsString(options['font_family'])
if options.isOptionValid('font_size'):
tprop.SetFontSize(options['font_size'])
if options.isOptionValid('bold'):
tprop.SetBold(options['bold'])
if options.isOptionValid('italic'):
tprop.SetItalic(options['italic'])
|
harterj/moose
|
python/chigger/utils/FontOptions.py
|
Python
|
lgpl-2.1
| 2,493
|
[
"MOOSE",
"VTK"
] |
d1faad37fd1a435c44194798a996a4e594b122d690ea6439f125bc1475ab0c6f
|
#!/usr/bin/env python3
#* This file is part of the MOOSE framework
#* https://www.mooseframework.org
#*
#* All rights reserved, see COPYRIGHT for full restrictions
#* https://github.com/idaholab/moose/blob/master/COPYRIGHT
#*
#* Licensed under LGPL 2.1, please see LICENSE for details
#* https://www.gnu.org/licenses/lgpl-2.1.html
import os
import sys
import numpy as np
from scipy.special import erf
import matplotlib.pyplot as plt
def expected(x):
cond = 2.2
porosity = 0.1
rock_density = 0.5
rock_specific_heat_cap = 2.2
tinf = 300
tzero = 200
t = 1.0E2
alpha = cond / (1.0 - porosity) / rock_density / rock_specific_heat_cap
temp = tinf + (tzero - tinf) * erf(x / np.sqrt(4.0 * alpha * t))
return temp
def equal_to_tol(a, b):
return (np.abs(a) < 1.0 and np.abs(b) < 1.0) or ((a - b)/(a + b) < 1.0E-5)
def check_data(fn, correct_answer):
try:
f = open(fn, 'r')
data = map(float, f.readlines()[-2].strip().split(","))
f.close()
for i in range(len(correct_answer)):
if not equal_to_tol(data[i], correct_answer[i]):
sys.stderr.write(fn + "is not giving the correct answer\n")
except:
sys.stderr.write("Cannot read " + fn + ", or it contains erroneous data\n")
def no_fluid(fn):
correct_answer = map(float, "100,300,262.05620419362,233.16444061125,215.5562816469,206.53657081398,202.5048010279,200.88831039136,200.29514030239,200.09319946286,200.02989431659,200.0158846078".split(","))
check_data(fn, correct_answer)
return [correct_answer[1 + i] for i in range(11)]
def twoph(fn):
correct_answer = map(float, "100,300,262.05620419362,233.16444061125,215.5562816469,206.53657081398,202.5048010279,200.88831039136,200.29514030239,200.09319946286,200.02989431659,200.0158846078".split(","))
check_data(fn, correct_answer)
return [correct_answer[1 + i] for i in range(11)]
xpoints = np.arange(0, 101, 1)
moosex = range(0, 110, 10)
zero_phase = no_fluid("../../../../../../test/tests/heat_conduction/gold/no_fluid.csv")
two_phase = twoph( "../../../../../../test/tests/heat_conduction/gold/two_phase.csv")
plt.figure()
plt.plot(xpoints, expected(xpoints), 'k-', linewidth = 3.0, label = 'expected')
plt.plot(moosex, zero_phase, 'rs', markersize = 10.0, label = 'MOOSE (no fluid)')
plt.plot(moosex, two_phase, 'b^', label = 'MOOSE (2 phase)')
plt.legend(loc = 'upper right')
plt.xlabel("x (m)")
plt.ylabel("Temperature (K)")
plt.title("Heat conduction in 1D")
plt.axis([0, 100, 199, 301])
plt.savefig("heat_conduction_1d.png")
sys.exit(0)
|
nuclear-wizard/moose
|
modules/porous_flow/doc/content/modules/porous_flow/tests/heat_conduction/heat_conduction.py
|
Python
|
lgpl-2.1
| 2,591
|
[
"MOOSE"
] |
ed315e22e412a62f259f331cc423198819ef17f42271382223e06577290844b8
|
#!/usr/bin/env python
from __future__ import print_function
#
# LSST Data Management System
# Copyright 2012, 2015 LSST Corporation.
#
# This product includes software developed by the
# LSST Project (http://www.lsst.org/).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the LSST License Statement and
# the GNU General Public License along with this program. If not,
# see <http://www.lsstcorp.org/LegalNotices/>.
#
import os
import warnings
import unittest
import lsst.afw.image as afwImage
import lsst.utils.tests as utilsTests
from lsst.utils import getPackageDir
import lsst.pex.exceptions as pexExcept
import lsst.daf.persistence as dafPersist
class GetRawTestCase(unittest.TestCase):
"""Testing butler raw image retrieval"""
def setUp(self):
try:
datadir = getPackageDir("testdata_decam")
except pexExcept.NotFoundError:
message = "testdata_decam not setup. Skipping."
warnings.warn(message)
raise unittest.SkipTest(message)
self.repoPath = os.path.join(datadir, "rawData")
self.butler = dafPersist.Butler(root=self.repoPath)
self.size = (2160, 4146)
self.dataId = {'visit': 229388, 'ccdnum': 1}
self.filter = "z"
self.exptime = 200.0
def tearDown(self):
del self.butler
def testPackageName(self):
name = dafPersist.Butler.getMapperClass(root=self.repoPath).packageName
self.assertEqual(name, "obs_decam")
def testRaw(self):
"""Test retrieval of raw image"""
exp = self.butler.get("raw", self.dataId)
print("dataId: %s" % self.dataId)
print("width: %s" % exp.getWidth())
print("height: %s" % exp.getHeight())
print("detector id: %s" % exp.getDetector().getId())
self.assertEqual(exp.getWidth(), self.size[0])
self.assertEqual(exp.getHeight(), self.size[1])
self.assertEqual(exp.getDetector().getId(), self.dataId["ccdnum"])
self.assertEqual(exp.getFilter().getFilterProperty().getName(), self.filter)
self.assertTrue(exp.hasWcs())
# Metadata which should have been copied from zeroth extension.
self.assertIn("MJD-OBS", exp.getMetadata().paramNames())
self.assertEqual(exp.getCalib().getExptime(), self.exptime)
# Example of metadata which should *not* have been copied from zeroth extension.
self.assertNotIn("PROPOSER", exp.getMetadata().paramNames())
def testRawMetadata(self):
"""Test retrieval of metadata"""
md = self.butler.get("raw_md", self.dataId)
print("EXPNUM(visit): %s" % md.get('EXPNUM'))
print("ccdnum: %s" % md.get('CCDNUM'))
self.assertEqual(md.get('EXPNUM'), self.dataId["visit"])
self.assertEqual(md.get('CCDNUM'), self.dataId["ccdnum"])
def testBias(self):
"""Test retrieval of bias image"""
exp = self.butler.get("bias", self.dataId)
print("dataId: %s" % self.dataId)
print("detector id: %s" % exp.getDetector().getId())
self.assertEqual(exp.getDetector().getId(), self.dataId["ccdnum"])
self.assertTrue(exp.hasWcs())
def testFlat(self):
"""Test retrieval of flat image"""
exp = self.butler.get("flat", self.dataId)
print("dataId: %s" % self.dataId)
print("detector id: %s" % exp.getDetector().getId())
print("filter: %s" % self.filter)
self.assertEqual(exp.getDetector().getId(), self.dataId["ccdnum"])
self.assertEqual(exp.getFilter().getFilterProperty().getName(), self.filter)
self.assertTrue(exp.hasWcs())
def testFringe(self):
"""Test retrieval of fringe image"""
exp = self.butler.get("fringe", self.dataId)
print("dataId: %s" % self.dataId)
print("detector id: %s" % exp.getDetector().getId())
print("filter: %s" % self.filter)
self.assertEqual(exp.getDetector().getId(), self.dataId["ccdnum"])
self.assertEqual(exp.getFilter().getFilterProperty().getName(), self.filter)
def testDefect(self):
"""Test retrieval of defect list"""
defectList = self.butler.get("defects", self.dataId)
self.assertEqual(len(defectList), 9)
for d in defectList:
self.assertIsInstance(d, afwImage.DefectBase)
#-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-
def suite():
"""Returns a suite containing all the test cases in this module."""
utilsTests.init()
suites = []
suites += unittest.makeSuite(GetRawTestCase)
suites += unittest.makeSuite(utilsTests.MemoryTestCase)
return unittest.TestSuite(suites)
def run(shouldExit = False):
"""Run the tests"""
utilsTests.run(suite(), shouldExit)
if __name__ == "__main__":
run(True)
|
yalsayyad/obs_decam
|
tests/getRaw.py
|
Python
|
gpl-3.0
| 5,285
|
[
"VisIt"
] |
20081e8f958ee2b392db0cab1b60493a45ae6cde94eb1140605d088cab6ba6a4
|
#!/usr/bin/env python
from astropy.io import fits
from astropy.time import Time
import glob
import os
import photutils
import scipy.optimize as sco
import numpy as np
# Gaussian functional form assumed for PSF fits
def psf((xx,yy),s0,s1,x0,y0,w):
elow = -50.
arg = - w * ( (xx-x0)**2 + (yy-y0)**2 )
arg[arg <= elow] = elow
intensity = s0 + s1 * np.exp( arg )
fwhm = 2. * np.sqrt(np.log(2.)/np.abs(w))
# Turn 2D intensity array into 1D array
return intensity.ravel()
# Total integrated flux and fwhm for the assumed Gaussian PSF
def psf_flux(s0,s1,x0,y0,w):
flux = np.pi * s1/np.abs(w)
fwhm = 2. * np.sqrt(np.log(2.)/np.abs(w))
return fwhm, flux
def center(xx):
global imdata
# The aperture size for finding the location of the stars is arbitrarily set here
app = 7.
flux = -photutils.aperture_circular(imdata, xx[0], xx[1], app, method='exact',subpixels=10)
return flux
# The derivative of the Gaussian PSF with respect to x and y in pixels. eps = 0.2 pixels
def der_center(xx):
eps = 0.2
# The aperture size for finding the location of the stars is arbitrarily set here
app = 7.
der = np.zeros_like(xx)
fxp1 = center([ xx[0] + eps, xx[1] ])
fxm1 = center([ xx[0] - eps, xx[1] ])
fyp1 = center([ xx[0], xx[1] + eps ])
fym1 = center([ xx[0], xx[1] - eps ])
der[0] = (fxp1-fxm1)/(2.*eps)
der[1] = (fyp1-fym1)/(2.*eps)
return der
# The main function for doing aperture photometry on individual FITS files
# app = -1 means the results are for a PSF fit instead of aperture photometry
def aperture(image,hdr):
global iap, pguess_old, nstars, svec
dnorm = 500.
rann1 = 18.
dann = 2.
rann2 = rann1 + dann
app_min = 1.
app_max = 19.
dapp = 1.
app_sizes = np.arange(app_min,app_max,dapp)
# If first time through, read in "guesses" for locations of stars
if iap == 0:
var = np.loadtxt('phot_coords')
xvec = var[:,0]
yvec = var[:,1]
nstars = len(xvec)
#print app_sizes,'\n'
else:
xvec = svec[:,0]
yvec = svec[:,1]
# Find locations of stars
dxx0 = 10.
for i in range(nstars):
xx0 = [xvec[i], yvec[i]]
xbounds = (xx0[0]-dxx0,xx0[0]+dxx0)
ybounds = (xx0[1]-dxx0,xx0[1]+dxx0)
#res = sco.minimize(center, xx0, method='BFGS', jac=der_center)
#res = sco.fmin_tnc(center, xx0, bounds=(xbounds,ybounds))
#res = sco.minimize(center, xx0, method='tnc', bounds=(xbounds,ybounds))
res = sco.minimize(center, xx0, method='L-BFGS-B', bounds=(xbounds,ybounds),jac=der_center)
xx0=res.x
xvec[i] = xx0[0]
yvec[i] = xx0[1]
# Calculate sky around stars
sky = photutils.annulus_circular(image, xvec, yvec, rann1, rann2, method='exact',subpixels=10)
# Do psf fits to stars. Results are stored in arrays fwhm, pflux, psky, psf_x, and psf_y
fwhm = np.zeros(nstars)
# Make stacked array of star positions from aperture photometry
svec = np.dstack((xvec,yvec))[0]
#print svec
# Make stacked array of star positions from PSF fitting
# pvec = np.dstack((psf_x,psf_y))[0]
pvec = svec
iap = iap + 1
starr = []
apvec = []
app=-1.0
# Get time of observation from the header
#date = hdr['DATE-OBS'] # for Argos files
#utc = hdr['UTC'] # for Argos files
date = hdr['UTC-DATE'] # for Pro-EM files
utc = hdr['UTC-BEG'] # for Pro-EM files
times = date + " " + utc
t = Time(times, format='iso', scale='utc')
# Calculate Julian Date of observation
jd = t.jd
for app in app_sizes:
flux = photutils.aperture_circular(image, xvec, yvec, app, method='exact',subpixels=10)
skyc = sky*app**2/(rann2**2 - rann1**2)
fluxc = flux - skyc
starr.append([fluxc,skyc,fwhm])
apvec.append(app)
starr = np.array(starr)
apvec = np.array(apvec)
#print starr
return jd,svec,pvec,apvec,starr
def combine(pattern,fileout):
files = glob.glob(pattern)
print 'files= ',files
n=0
print " Combining frames: "
for file in files:
pa,fi = os.path.split(file)
print fi,
if ( (n+1)/4 )*4 == n+1:
print " "
list = fits.open(file)
imdata = list[0].data
hdr = list[0].header
if n==0:
exptime0 = hdr['exptime']
exptime = hdr['exptime']
if exptime != exptime0:
print "\nError: Exposure time mismatch in",file,"\n"
exit()
if file == files[0]:
comb = imdata
else:
comb = comb + imdata
list.close()
n=n+1
print ""
comb = comb/float(n)
out = fits.open(files[0])
hdr = out[0].header
#hdr.rename_keyword('NTP:GPS','NTP-GPS') # Argos
combdata = out[0].data
combdata = comb
out.writeto(fileout)
out.close()
return exptime, files, comb
def head_write(ffile,object,nstars):
dform0='# Aperture reductions for target {0}. Total number of stars is {1}\n'.format(object,nstars)
ffile.write(dform0)
# Format header for the general case of nstars stars
eform0='# time (JD) App (pix) Target Counts'
'Comparison Counts Sky Counts Target Position Comp Position FWHM Fits File\n'
for i in range(1,nstars):
eform0 = eform0 + ' Comp {0} Counts'.format(i)
eform0 = eform0 + ' Target Sky'
for i in range(1,nstars):
eform0 = eform0 + ' Comp {0} Sky'.format(i)
eform0 = eform0 + ' Target Position'
for i in range(1,nstars):
eform0 = eform0 + ' Comp {0} Position'.format(i)
eform0 = eform0 + ' Target FWHM'
for i in range(1,nstars):
eform0 = eform0 + ' Comp {0} FWHM'.format(i)
eform0 = eform0 + ' FITS File\n'
ffile.write(eform0)
def app_write(efout,ndim,nstars,jd,apvec,svec,pvec,var2):
for i in range(0,ndim):
if apvec[i] >= 0.0:
eform = '{0:18.8f} {1:7.2f} '.format(jd,apvec[i])
for j in range(0,nstars):
eform = eform + '{0:17.8f} '.format(var2[i,0,j])
for j in range(0,nstars):
eform = eform + '{0:17.8f} '.format(var2[i,1,j])
for j in range(0,nstars):
eform = eform + '{0:8.2f} {1:7.2f} '.format(svec[j,0],svec[j,1])
for j in range(0,nstars):
eform = eform + '{0:8.3f} '.format(var2[i,2,j])
eform = eform + file + '\n'
else:
eform = '{0:18.8f} {1:7.2f} '.format(jd,apvec[i])
for j in range(0,nstars):
eform = eform + '{0:17.8f} '.format(var2[i,0,j])
for j in range(0,nstars):
eform = eform + '{0:17.8f} '.format(var2[i,1,j])
for j in range(0,nstars):
eform = eform + '{0:8.2f} {1:7.2f} '.format(pvec[j,0],pvec[j,1])
for j in range(0,nstars):
eform = eform + '{0:8.3f} '.format(var2[i,2,j])
eform = eform + file + '\n'
efout.write(eform)
if __name__ == '__main__':
global imdata, iap, nstars
# Get list of all FITS images for run
run_pattern = 'GD244-????.fits'
#fits_files = glob.glob('A????.????.fits')
fits_files = glob.glob(run_pattern)
# This is the first image
fimage = fits_files[0]
#print "Dark correcting and flat-fielding files...\n"
list = fits.open(fimage)
hdr = list[0].header
object= hdr['object']
#run= hdr['run']
efout=open('lightcurve.app','w')
#print 'Calculating apertures:'
iap = 0
icount = 1
fcount = ''
print 'Processing files:'
for file in fits_files:
fcount = fcount + ' ' + file
if np.remainder(icount,5) == 0:
print fcount
fcount = ''
else:
if file == fits_files[-1]:
print fcount
icount = icount + 1
# open FITS file
list = fits.open(file)
imdata = list[0].data
hdr = list[0].header
# Call aperture photometry routine. Get times, positions, and fluxes
# var2 contains the list [fluxc,skyc,fwhm])
# jd,svec,pvec,apvec,starr
# fluxc, skyc, and fwhm are all lists of length nstars
# jd, svec, pvec, apvec, var2 = aperture(imdata,hdr,dnorm)
jd, svec, pvec, apvec, var2 = aperture(imdata,hdr)
ndim = len(apvec)
# First time through write header
if icount == 2:
head_write(efout,object,nstars)
# Write out results for all apertures
app_write(efout,ndim,nstars,jd,apvec,svec,pvec,var2)
|
keatonb/tsphot
|
fits_process.py
|
Python
|
mit
| 8,982
|
[
"Gaussian"
] |
c558e1d6c82bc4fb61c67def7c01ac971f2c5941b3432fe42e784fc31a0bee32
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""L2HMC compatible with TensorFlow's eager execution.
Reference [Generalizing Hamiltonian Monte Carlo with Neural
Networks](https://arxiv.org/pdf/1711.09268.pdf)
Code adapted from the released TensorFlow graph implementation by original
authors https://github.com/brain-research/l2hmc.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import numpy.random as npr
import tensorflow as tf
import tensorflow.contrib.eager as tfe
from tensorflow.contrib.eager.python.examples.l2hmc import neural_nets
class Dynamics(tf.keras.Model):
"""Dynamics engine of naive L2HMC sampler."""
def __init__(self,
x_dim,
minus_loglikelihood_fn,
n_steps=25,
eps=.1,
np_seed=1):
"""Initialization.
Args:
x_dim: dimensionality of observed data
minus_loglikelihood_fn: log-likelihood function of conditional probability
n_steps: number of leapfrog steps within each transition
eps: initial value learnable scale of step size
np_seed: Random seed for numpy; used to control sampled masks.
"""
super(Dynamics, self).__init__()
npr.seed(np_seed)
self.x_dim = x_dim
self.potential = minus_loglikelihood_fn
self.n_steps = n_steps
self._construct_time()
self._construct_masks()
self.position_fn = neural_nets.GenericNet(x_dim, factor=2.)
self.momentum_fn = neural_nets.GenericNet(x_dim, factor=1.)
self.eps = tf.Variable(
initial_value=eps, name="eps", dtype=tf.float32, trainable=True)
def apply_transition(self, position):
"""Propose a new state and perform the accept or reject step."""
# Simulate dynamics both forward and backward;
# Use sampled Bernoulli masks to compute the actual solutions
position_f, momentum_f, accept_prob_f = self.transition_kernel(
position, forward=True)
position_b, momentum_b, accept_prob_b = self.transition_kernel(
position, forward=False)
# Decide direction uniformly
batch_size = tf.shape(position)[0]
forward_mask = tf.cast(tf.random_uniform((batch_size,)) > .5, tf.float32)
backward_mask = 1. - forward_mask
# Obtain proposed states
position_post = (
forward_mask[:, None] * position_f +
backward_mask[:, None] * position_b)
momentum_post = (
forward_mask[:, None] * momentum_f +
backward_mask[:, None] * momentum_b)
# Probability of accepting the proposed states
accept_prob = forward_mask * accept_prob_f + backward_mask * accept_prob_b
# Accept or reject step
accept_mask = tf.cast(
accept_prob > tf.random_uniform(tf.shape(accept_prob)), tf.float32)
reject_mask = 1. - accept_mask
# Samples after accept/reject step
position_out = (
accept_mask[:, None] * position_post + reject_mask[:, None] * position)
return position_post, momentum_post, accept_prob, position_out
def transition_kernel(self, position, forward=True):
"""Transition kernel of augmented leapfrog integrator."""
lf_fn = self._forward_lf if forward else self._backward_lf
# Resample momentum
momentum = tf.random_normal(tf.shape(position))
position_post, momentum_post = position, momentum
sumlogdet = 0.
# Apply augmented leapfrog steps
for i in range(self.n_steps):
position_post, momentum_post, logdet = lf_fn(position_post, momentum_post,
i)
sumlogdet += logdet
accept_prob = self._compute_accept_prob(position, momentum, position_post,
momentum_post, sumlogdet)
return position_post, momentum_post, accept_prob
def _forward_lf(self, position, momentum, i):
"""One forward augmented leapfrog step. See eq (5-6) in paper."""
t = self._get_time(i)
mask, mask_inv = self._get_mask(i)
sumlogdet = 0.
momentum, logdet = self._update_momentum_forward(position, momentum, t)
sumlogdet += logdet
position, logdet = self._update_position_forward(position, momentum, t,
mask, mask_inv)
sumlogdet += logdet
position, logdet = self._update_position_forward(position, momentum, t,
mask_inv, mask)
sumlogdet += logdet
momentum, logdet = self._update_momentum_forward(position, momentum, t)
sumlogdet += logdet
return position, momentum, sumlogdet
def _backward_lf(self, position, momentum, i):
"""One backward augmented leapfrog step. See Appendix A in paper."""
# Reversed index/sinusoidal time
t = self._get_time(self.n_steps - i - 1)
mask, mask_inv = self._get_mask(self.n_steps - i - 1)
sumlogdet = 0.
momentum, logdet = self._update_momentum_backward(position, momentum, t)
sumlogdet += logdet
position, logdet = self._update_position_backward(position, momentum, t,
mask_inv, mask)
sumlogdet += logdet
position, logdet = self._update_position_backward(position, momentum, t,
mask, mask_inv)
sumlogdet += logdet
momentum, logdet = self._update_momentum_backward(position, momentum, t)
sumlogdet += logdet
return position, momentum, sumlogdet
def _update_momentum_forward(self, position, momentum, t):
"""Update v in the forward leapfrog step."""
grad = self.grad_potential(position)
scale, translation, transformed = self.momentum_fn([position, grad, t])
scale *= .5 * self.eps
transformed *= self.eps
momentum = (
momentum * tf.exp(scale) -
.5 * self.eps * (tf.exp(transformed) * grad - translation))
return momentum, tf.reduce_sum(scale, axis=1)
def _update_position_forward(self, position, momentum, t, mask, mask_inv):
"""Update x in the forward leapfrog step."""
scale, translation, transformed = self.position_fn(
[momentum, mask * position, t])
scale *= self.eps
transformed *= self.eps
position = (
mask * position +
mask_inv * (position * tf.exp(scale) + self.eps *
(tf.exp(transformed) * momentum + translation)))
return position, tf.reduce_sum(mask_inv * scale, axis=1)
def _update_momentum_backward(self, position, momentum, t):
"""Update v in the backward leapfrog step. Inverting the forward update."""
grad = self.grad_potential(position)
scale, translation, transformed = self.momentum_fn([position, grad, t])
scale *= -.5 * self.eps
transformed *= self.eps
momentum = (
tf.exp(scale) * (momentum + .5 * self.eps *
(tf.exp(transformed) * grad - translation)))
return momentum, tf.reduce_sum(scale, axis=1)
def _update_position_backward(self, position, momentum, t, mask, mask_inv):
"""Update x in the backward leapfrog step. Inverting the forward update."""
scale, translation, transformed = self.position_fn(
[momentum, mask * position, t])
scale *= -self.eps
transformed *= self.eps
position = (
mask * position + mask_inv * tf.exp(scale) *
(position - self.eps * (tf.exp(transformed) * momentum + translation)))
return position, tf.reduce_sum(mask_inv * scale, axis=1)
def _compute_accept_prob(self, position, momentum, position_post,
momentum_post, sumlogdet):
"""Compute the prob of accepting the proposed state given old state."""
old_hamil = self.hamiltonian(position, momentum)
new_hamil = self.hamiltonian(position_post, momentum_post)
prob = tf.exp(tf.minimum(old_hamil - new_hamil + sumlogdet, 0.))
# Ensure numerical stability as well as correct gradients
return tf.where(tf.is_finite(prob), prob, tf.zeros_like(prob))
def _construct_time(self):
"""Convert leapfrog step index into sinusoidal time."""
self.ts = []
for i in range(self.n_steps):
t = tf.constant(
[
np.cos(2 * np.pi * i / self.n_steps),
np.sin(2 * np.pi * i / self.n_steps)
],
dtype=tf.float32)
self.ts.append(t[None, :])
def _get_time(self, i):
"""Get sinusoidal time for i-th augmented leapfrog step."""
return self.ts[i]
def _construct_masks(self):
"""Construct different binary masks for different time steps."""
self.masks = []
for _ in range(self.n_steps):
# Need to use npr here because tf would generated different random
# values across different `sess.run`
idx = npr.permutation(np.arange(self.x_dim))[:self.x_dim // 2]
mask = np.zeros((self.x_dim,))
mask[idx] = 1.
mask = tf.constant(mask, dtype=tf.float32)
self.masks.append(mask[None, :])
def _get_mask(self, i):
"""Get binary masks for i-th augmented leapfrog step."""
m = self.masks[i]
return m, 1. - m
def kinetic(self, v):
"""Compute the kinetic energy."""
return .5 * tf.reduce_sum(v**2, axis=1)
def hamiltonian(self, position, momentum):
"""Compute the overall Hamiltonian."""
return self.potential(position) + self.kinetic(momentum)
def grad_potential(self, position, check_numerics=True):
"""Get gradient of potential function at current location."""
if tf.executing_eagerly():
grad = tfe.gradients_function(self.potential)(position)[0]
else:
grad = tf.gradients(self.potential(position), position)[0]
return grad
# Examples of unnormalized log densities
def get_scg_energy_fn():
"""Get energy function for 2d strongly correlated Gaussian."""
# Avoid recreating tf constants on each invocation of gradients
mu = tf.constant([0., 0.])
sigma = tf.constant([[50.05, -49.95], [-49.95, 50.05]])
sigma_inv = tf.matrix_inverse(sigma)
def energy(x):
"""Unnormalized minus log density of 2d strongly correlated Gaussian."""
xmmu = x - mu
return .5 * tf.diag_part(
tf.matmul(tf.matmul(xmmu, sigma_inv), tf.transpose(xmmu)))
return energy, mu, sigma
def get_rw_energy_fn():
"""Get energy function for rough well distribution."""
# For small eta, the density underlying the rough-well energy is very close to
# a unit Gaussian; however, the gradient is greatly affected by the small
# cosine perturbations
eta = 1e-2
mu = tf.constant([0., 0.])
sigma = tf.constant([[1., 0.], [0., 1.]])
def energy(x):
ip = tf.reduce_sum(x**2., axis=1)
return .5 * ip + eta * tf.reduce_sum(tf.cos(x / eta), axis=1)
return energy, mu, sigma
# Loss function
def compute_loss(dynamics, x, scale=.1, eps=1e-4):
"""Compute loss defined in equation (8)."""
z = tf.random_normal(tf.shape(x)) # Auxiliary variable
x_, _, x_accept_prob, x_out = dynamics.apply_transition(x)
z_, _, z_accept_prob, _ = dynamics.apply_transition(z)
# Add eps for numerical stability; following released impl
x_loss = tf.reduce_sum((x - x_)**2, axis=1) * x_accept_prob + eps
z_loss = tf.reduce_sum((z - z_)**2, axis=1) * z_accept_prob + eps
loss = tf.reduce_mean(
(1. / x_loss + 1. / z_loss) * scale - (x_loss + z_loss) / scale, axis=0)
return loss, x_out, x_accept_prob
def loss_and_grads(dynamics, x, loss_fn=compute_loss):
"""Obtain loss value and gradients."""
with tf.GradientTape() as tape:
loss_val, out, accept_prob = loss_fn(dynamics, x)
grads = tape.gradient(loss_val, dynamics.trainable_variables)
return loss_val, grads, out, accept_prob
|
dongjoon-hyun/tensorflow
|
tensorflow/contrib/eager/python/examples/l2hmc/l2hmc.py
|
Python
|
apache-2.0
| 12,269
|
[
"Gaussian"
] |
9a82940a89c0e4c22a1eb784906bbcf7a4cbc1f5f62ef54b2b414109e33a6e2a
|
# __BROKEN__
# Izhikevich_with_synapse.py ---
#
# Filename: Izhikevich_with_synapse.py
# Description:
# Author: Subhasis Ray
# Maintainer:
# Created: Sat Apr 19 10:47:15 2014 (+0530)
# Version:
# Last-Updated:
# By:
# Update #: 0
# URL:
# Keywords:
# Compatibility:
#
#
# Commentary:
#
#
# Change log:
#
#
#
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 3, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; see the file COPYING. If not, write to
# the Free Software Foundation, Inc., 51 Franklin Street, Fifth
# Floor, Boston, MA 02110-1301, USA.
#
#
# Code:
import sys
import pylab
sys.path.append('../../python')
import moose
from moose import utils as mutils
def make_neuron(path):
"""Create a neuron with parameters set for tonic_bursting."""
nrn = moose.IzhikevichNrn(path)
# "tonic_bursting": ['C', 0.02 , 0.2 , -50.0, 2.0 , 15.0, -70.0, 220.0], # Fig. 1.C
#print((path,dir(nrn)))
nrn.alpha = 0.04
nrn.beta = 5.0
nrn.gamma = 140.0
#nrn.Rm = 1.0 # FIXME: IzhikevichNrn does not have this field.
nrn.a = 0.02
nrn.b = 0.2
nrn.c = -50.0
nrn.d = 2.0
nrn.initVm = -70.0
nrn.Vmax = 40.0
return nrn
def make_pulsegen(path):
"""Create a pulse generator to test tonic spiking in the neuron."""
pulse = moose.PulseGen(path)
pulse.delay[0] = 10.0 # ms
pulse.width[0] = 200.0 # ms
pulse.level[0] = 15e-3 # uA
return pulse
def make_synapse(path):
"""Create a synapse with two time constants."""
syn = moose.SynChan(path)
syn.tau1 = 5.0 # ms
syn.tau2 = 1.0 # ms
syn.Gbar = 1.0 # mS
syn.Ek = 0.0
synsh = moose.SimpleSynHandler( path + '/sh' )
synsh.synapse.num = 1
# syn.bufferTime = 1.0 # ms
synsh.synapse.delay = 1.0
synsh.synapse.weight = 1.0
print(('Synapses:', len(synsh.synapse), 'w=', synsh.synapse[0].weight))
spikegen = moose.SpikeGen('%s/spike' % (syn.parent.path))
spikegen.edgeTriggered = False # Make it fire continuously when input is high
spikegen.refractT = 10.0 # With this setting it will fire at 1 s / 10 ms = 100 Hz
spikegen.threshold = 0.5
# This will send alternatind -1 and +1 to SpikeGen to make it fire
spike_stim = moose.PulseGen('%s/spike_stim' % (syn.parent.path))
spike_stim.delay[0] = 50.0
spike_stim.level[0] = 1.0
spike_stim.width[0] = 100.0
moose.connect(spike_stim, 'output', spikegen, 'Vm')
m = moose.connect(spikegen, 'spikeOut', synsh.synapse[0], 'addSpike')
return syn, spikegen
def make_model():
model = moose.Neutral('/model') # Just a container for other things
neuron = make_neuron('/model/neuron')
pulse = make_pulsegen('/model/pulse')
synapse, spike_in = make_synapse('/model/synapse')
# moose.connect(pulse, 'output', neuron, 'injectDest')
moose.connect(neuron, 'channel', synapse, 'channel')
return {'neuron': neuron,
'pulse': pulse,
'synapse': synapse,
'spike_in': spike_in}
def setup_data_recording(neuron, pulse, synapse, spikegen):
data = moose.Neutral('/data')
vm_table = moose.Table('/data/Vm')
moose.connect(vm_table, 'requestOut', neuron, 'getVm')
inject_table = moose.Table('/data/Inject')
moose.connect(inject_table, 'requestOut', pulse, 'getOutputValue')
gk_table = moose.Table('/data/Gk')
moose.connect(gk_table, 'requestOut', synapse, 'getGk')
spike_in_table = moose.Table('/data/spike_in')
moose.connect(spikegen, 'spikeOut', spike_in_table, 'spike')
return [vm_table, inject_table, gk_table, spike_in_table]
def main():
"""
This shows the use of SynChan with Izhikevich neuron. This can be
used for creating a network of Izhikevich neurons.
"""
simtime = 200.0
stepsize = 10.0
model_dict = make_model()
vm, inject, gk, spike = setup_data_recording(model_dict['neuron'],
model_dict['pulse'],
model_dict['synapse'],
model_dict['spike_in'])
mutils.setDefaultDt(elecdt=0.01, plotdt2=0.25)
mutils.assignDefaultTicks(solver='ee')
moose.reinit()
mutils.stepRun(simtime, stepsize)
pylab.subplot(411)
pylab.plot(pylab.linspace(0, simtime, len(vm.vector)), vm.vector, label='Vm (mV)')
pylab.legend()
pylab.subplot(412)
pylab.plot(pylab.linspace(0, simtime, len(inject.vector)), inject.vector, label='Inject (uA)')
pylab.legend()
pylab.subplot(413)
pylab.plot(spike.vector, pylab.ones(len(spike.vector)), '|', label='input spike times')
pylab.legend()
pylab.subplot(414)
pylab.plot(pylab.linspace(0, simtime, len(gk.vector)), gk.vector, label='Gk (mS)')
pylab.legend()
pylab.show()
if __name__ == '__main__':
main()
#
# Izhikevich_with_synapse.py ends here
|
BhallaLab/moose-examples
|
snippets/Izhikevich_with_synapse.py
|
Python
|
gpl-2.0
| 5,356
|
[
"MOOSE",
"NEURON"
] |
de2563a815600bc14c12188b85c1020727e9900495e9c18621f4cabc40c3eec2
|
""" Test for Profiler.py
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import time
from os.path import dirname, join
from subprocess import Popen
import pytest
from flaky import flaky
import DIRAC
from DIRAC.Core.Utilities.Profiler import Profiler
# Mark this entire module as slow
pytestmark = pytest.mark.slow
def test_base():
p = Profiler()
res = p.pid()
assert res["OK"] is False
res = p.status()
assert res["OK"] is False
mainProcess = Popen(
[
"python",
join(dirname(DIRAC.__file__), "tests/Utilities/ProcessesCreator_withChildren.py"),
]
)
time.sleep(1)
p = Profiler(mainProcess.pid)
res = p.pid()
assert res["OK"] is True
res = p.status()
assert res["OK"] is True
res = p.runningTime()
assert res["OK"] is True
assert res["Value"] > 0
res = p.memoryUsage()
assert res["OK"] is True
assert res["Value"] > 0
resWC = p.memoryUsage(withChildren=True)
assert resWC["OK"] is True
assert resWC["Value"] > 0
assert resWC["Value"] >= res["Value"]
res = p.vSizeUsage()
assert res["OK"] is True
assert res["Value"] > 0
resWC = p.vSizeUsage(withChildren=True)
assert resWC["OK"] is True
assert resWC["Value"] > 0
assert resWC["Value"] >= res["Value"]
res = p.vSizeUsage()
assert res["OK"] is True
assert res["Value"] > 0
resWC = p.vSizeUsage(withChildren=True)
assert resWC["OK"] is True
assert resWC["Value"] > 0
assert resWC["Value"] >= res["Value"]
res = p.numThreads()
assert res["OK"] is True
assert res["Value"] > 0
resWC = p.numThreads(withChildren=True)
assert resWC["OK"] is True
assert resWC["Value"] > 0
assert resWC["Value"] >= res["Value"]
res = p.cpuPercentage()
assert res["OK"] is True
assert res["Value"] >= 0
resWC = p.cpuPercentage(withChildren=True)
assert resWC["OK"] is True
assert resWC["Value"] >= 0
assert resWC["Value"] >= res["Value"]
@flaky(max_runs=10, min_passes=2)
def test_cpuUsage():
mainProcess = Popen(
[
"python",
join(dirname(DIRAC.__file__), "tests/Utilities/ProcessesCreator_withChildren.py"),
]
)
time.sleep(2)
p = Profiler(mainProcess.pid)
res = p.pid()
assert res["OK"] is True
res = p.status()
assert res["OK"] is True
# user
res = p.cpuUsageUser()
assert res["OK"] is True
assert res["Value"] > 0
resC = p.cpuUsageUser(withChildren=True)
assert resC["OK"] is True
assert resC["Value"] > 0
assert resC["Value"] >= res["Value"]
res = p.cpuUsageUser()
assert res["OK"] is True
assert res["Value"] > 0
resC = p.cpuUsageUser(withChildren=True)
assert resC["OK"] is True
assert resC["Value"] > 0
assert resC["Value"] >= res["Value"]
resT = p.cpuUsageUser(withTerminatedChildren=True)
assert resT["OK"] is True
assert resT["Value"] > 0
assert resT["Value"] >= res["Value"]
resTC = p.cpuUsageUser(withChildren=True, withTerminatedChildren=True)
assert resTC["OK"] is True
assert resTC["Value"] > 0
assert resTC["Value"] >= res["Value"]
# system
res = p.cpuUsageSystem()
assert res["OK"] is True
assert res["Value"] >= 0
resWC = p.cpuUsageSystem(withChildren=True)
assert resWC["OK"] is True
assert resWC["Value"] >= 0
assert resWC["Value"] >= res["Value"]
res = p.cpuUsageSystem()
assert res["OK"] is True
assert res["Value"] > 0
resC = p.cpuUsageSystem(withChildren=True)
assert resC["OK"] is True
assert resC["Value"] > 0
assert resC["Value"] >= res["Value"]
resT = p.cpuUsageSystem(withTerminatedChildren=True)
assert resT["OK"] is True
assert resT["Value"] > 0
assert resT["Value"] >= res["Value"]
resTC = p.cpuUsageSystem(withChildren=True, withTerminatedChildren=True)
assert resTC["OK"] is True
assert resTC["Value"] > 0
assert resTC["Value"] >= res["Value"]
# After this the main process will no-longer exist
mainProcess.wait()
res = p.cpuUsageUser()
assert res["OK"] is False
assert res["Errno"] == 3
|
ic-hep/DIRAC
|
src/DIRAC/Core/Utilities/test/Test_Profiler.py
|
Python
|
gpl-3.0
| 4,254
|
[
"DIRAC"
] |
c4a49a494a06453b8f0b9ce1acbc81669f372a801f7631e156cecbd4fe7540c8
|
#!/usr/bin/env python
## \file tools.py
# \brief file i/o functions
# \author T. Lukaczyk, F. Palacios
# \version 3.2.9 "eagle"
#
# SU2 Lead Developers: Dr. Francisco Palacios (Francisco.D.Palacios@boeing.com).
# Dr. Thomas D. Economon (economon@stanford.edu).
#
# SU2 Developers: Prof. Juan J. Alonso's group at Stanford University.
# Prof. Piero Colonna's group at Delft University of Technology.
# Prof. Nicolas R. Gauger's group at Kaiserslautern University of Technology.
# Prof. Alberto Guardone's group at Polytechnic University of Milan.
# Prof. Rafael Palacios' group at Imperial College London.
#
# Copyright (C) 2012-2015 SU2, the open-source CFD code.
#
# SU2 is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# SU2 is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with SU2. If not, see <http://www.gnu.org/licenses/>.
# -------------------------------------------------------------------
# Imports
# -------------------------------------------------------------------
import os, time, sys, pickle, errno, copy
import shutil, glob
from SU2.util import ordered_bunch
# -------------------------------------------------------------------
# Read SU2_DOT Gradient Values
# -------------------------------------------------------------------
def read_gradients( Grad_filename , scale = 1.0):
""" reads the raw gradients from the gradient file
returns a list of floats
"""
# open file and skip first line
gradfile = open(Grad_filename)
gradfile.readline()
# read values
grad_vals = []
for line in gradfile:
line = line.strip()
if len(line) == 0:
break
grad_vals.append(float(line) * scale)
#: for each line
return grad_vals
#: def read_gradients()
# -------------------------------------------------------------------
# Read All Data from a Plot File
# -------------------------------------------------------------------
def read_plot( filename ):
""" reads a plot file
returns an ordered bunch with the headers for keys
and a list of each header's floats for values.
"""
extension = os.path.splitext( filename )[1]
# open history file
plot_file = open(filename)
# title?
line = plot_file.readline()
if line.startswith('TITLE'):
title = line.split('=')[1] .strip() # not used right now
line = plot_file.readline()
# process header
if '=' in line:
line = line.split("=")[1].strip()
line = line.split(",")
Variables = [ x.strip('" ') for x in line ]
n_Vars = len(Variables)
# initialize plot data dictionary
plot_data = ordered_bunch.fromkeys(Variables)
# must default each value to avoid pointer problems
for key in plot_data.keys(): plot_data[key] = []
# zone list
zones = []
# read all data rows
while 1:
# read line
line = plot_file.readline()
if not line:
break
#zone?
if line.startswith('ZONE'):
zone = line.split('=')[1].strip('" ')
zones.append(zone)
continue
# split line
line_data = line.strip().split(',')
line_data = [ float(x.strip()) for x in line_data ]
# store to dictionary
for i_Var in range(n_Vars):
this_variable = Variables[i_Var]
plot_data[this_variable] = plot_data[this_variable] + [ line_data[i_Var] ]
#: for each line
# check for number of zones
if len(zones) > 1:
raise IOError , 'multiple zones not supported'
# done
plot_file.close()
return plot_data
# -------------------------------------------------------------------
# Read All Data from History File
# -------------------------------------------------------------------
def read_history( History_filename ):
""" reads a history file
returns an ordered bunch with the history file headers for keys
and a list of each header's floats for values.
if header is an optimization objective, its name is mapped to
the optimization name.
Iter and Time(min) headers are mapped to ITERATION and TIME
respectively.
"""
# read plot file
plot_data = read_plot( History_filename )
# initialize history data dictionary
history_data = ordered_bunch()
# header name to config file name map
map_dict = get_headerMap()
# map header names
for key in plot_data.keys():
if map_dict.has_key(key):
var = map_dict[key]
else:
var = key
history_data[var] = plot_data[key]
return history_data
#: def read_history()
# -------------------------------------------------------------------
# Define Dictionary Map for Header Names
# -------------------------------------------------------------------
def get_headerMap():
""" returns a dictionary that maps history file header names
to optimization problem function names
"""
# header name to config file name map
map_dict = { "Iteration" : "ITERATION" ,
"CLift" : "LIFT" ,
"CDrag" : "DRAG" ,
"CSideForce" : "SIDEFORCE" ,
"Cp_Diff" : "INVERSE_DESIGN_PRESSURE" ,
"HeatFlux_Diff" : "INVERSE_DESIGN_HEATFLUX" ,
"HeatFlux_Total" : "TOTAL_HEATFLUX" ,
"HeatFlux_Maximum": "MAXIMUM_HEATFLUX" ,
"CMx" : "MOMENT_X" ,
"CMy" : "MOMENT_Y" ,
"CMz" : "MOMENT_Z" ,
"CFx" : "FORCE_X" ,
"CFy" : "FORCE_Y" ,
"CFz" : "FORCE_Z" ,
"CL/CD" : "EFFICIENCY" ,
"CEff" : "EFFICIENCY" ,
"CFreeSurface" : "FREE_SURFACE" ,
"CMerit" : "FIGURE_OF_MERIT" ,
"CQ" : "TORQUE" ,
"CT" : "THRUST" ,
"CEquivArea" : "EQUIVALENT_AREA" ,
"CNearFieldOF" : "NEARFIELD_PRESSURE" ,
"Avg_TotalPress" : "AVG_TOTAL_PRESSURE" ,
"FluxAvg_Pressure": "AVG_OUTLET_PRESSURE" ,
"MassFlowRate" : "MASS_FLOW_RATE" ,
"Time(min)" : "TIME" }
return map_dict
#: def get_headerMap()
# -------------------------------------------------------------------
# Optimizer Function Names
# -------------------------------------------------------------------
# Aerodynamic Optimizer Function Names
optnames_aero = [ "LIFT" ,
"DRAG" ,
"SIDEFORCE" ,
"MOMENT_X" ,
"MOMENT_Y" ,
"MOMENT_Z" ,
"FORCE_X" ,
"FORCE_Y" ,
"FORCE_Z" ,
"EFFICIENCY" ,
"FREE_SURFACE" ,
"FIGURE_OF_MERIT" ,
"TORQUE" ,
"THRUST" ,
"AVG_TOTAL_PRESSURE" ,
"AVG_OUTLET_PRESSURE" ,
"MASS_FLOW_RATE" ,
"EQUIVALENT_AREA" ,
"NEARFIELD_PRESSURE" ,
"INVERSE_DESIGN_PRESSURE" ,
"INVERSE_DESIGN_HEATFLUX" ,
"TOTAL_HEATFLUX" ,
"MAXIMUM_HEATFLUX" ]
#: optnames_aero
optnames_stab = [ "D_LIFT_D_ALPHA" ,
"D_DRAG_D_ALPHA" ,
"D_SIDEFORCE_D_ALPHA" ,
"D_MOMENT_X_D_ALPHA" ,
"D_MOMENT_Y_D_ALPHA" ,
"D_MOMENT_Z_D_ALPHA" ,
]
# Geometric Optimizer Function Names
optnames_geo = [ "MAX_THICKNESS" ,
"1/4_THICKNESS" ,
"1/3_THICKNESS" ,
"1/2_THICKNESS" ,
"2/3_THICKNESS" ,
"3/4_THICKNESS" ,
"AREA" ,
"AOA" ,
"CHORD" ,
"MAX_THICKNESS_SEC1" ,
"MAX_THICKNESS_SEC2" ,
"MAX_THICKNESS_SEC3" ,
"MAX_THICKNESS_SEC4" ,
"MAX_THICKNESS_SEC5" ,
"1/4_THICKNESS_SEC1" ,
"1/4_THICKNESS_SEC2" ,
"1/4_THICKNESS_SEC3" ,
"1/4_THICKNESS_SEC4" ,
"1/4_THICKNESS_SEC5" ,
"1/3_THICKNESS_SEC1" ,
"1/3_THICKNESS_SEC2" ,
"1/3_THICKNESS_SEC3" ,
"1/3_THICKNESS_SEC4" ,
"1/3_THICKNESS_SEC5" ,
"1/2_THICKNESS_SEC1" ,
"1/2_THICKNESS_SEC2" ,
"1/2_THICKNESS_SEC3" ,
"1/2_THICKNESS_SEC4" ,
"1/2_THICKNESS_SEC5" ,
"2/3_THICKNESS_SEC1" ,
"2/3_THICKNESS_SEC2" ,
"2/3_THICKNESS_SEC3" ,
"2/3_THICKNESS_SEC4" ,
"2/3_THICKNESS_SEC5" ,
"3/4_THICKNESS_SEC1" ,
"3/4_THICKNESS_SEC2" ,
"3/4_THICKNESS_SEC3" ,
"3/4_THICKNESS_SEC4" ,
"3/4_THICKNESS_SEC5" ,
"AREA_SEC1" ,
"AREA_SEC2" ,
"AREA_SEC3" ,
"AREA_SEC4" ,
"AREA_SEC5" ,
"AOA_SEC1" ,
"AOA_SEC2" ,
"AOA_SEC3" ,
"AOA_SEC4" ,
"AOA_SEC5" ,
"CHORD_SEC1" ,
"CHORD_SEC2" ,
"CHORD_SEC3" ,
"CHORD_SEC4" ,
"CHORD_SEC5" ,
"VOLUME" ]
#: optnames_geo
# -------------------------------------------------------------------
# Read Aerodynamic Function Values from History File
# -------------------------------------------------------------------
def read_aerodynamics( History_filename , special_cases=[], final_avg=0 ):
""" values = read_aerodynamics(historyname, special_cases=[])
read aerodynamic function values from history file
Outputs:
dictionary with function keys and thier values
if special cases has 'UNSTEADY_SIMULATION', returns time averaged data
otherwise returns final value from history file
"""
# read the history data
history_data = read_history(History_filename)
# list of functions to pull
func_names = optnames_aero
# pull only these functions
Func_Values = ordered_bunch()
for this_objfun in func_names:
if history_data.has_key(this_objfun):
Func_Values[this_objfun] = history_data[this_objfun]
# for unsteady cases, average time-accurate objective function values
if 'UNSTEADY_SIMULATION' in special_cases:
for key,value in Func_Values.iteritems():
Func_Values[key] = sum(value)/len(value)
# average the final iterations
elif final_avg:
for key,value in Func_Values.iteritems():
# only the last few iterations
i_fin = min([final_avg,len(value)])
value = value[-i_fin:]
Func_Values[key] = sum(value)/len(value)
# otherwise, keep only last value
else:
for key,value in Func_Values.iteritems():
Func_Values[key] = value[-1]
return Func_Values
#: def read_aerodynamics()
# -------------------------------------------------------------------
# Get Objective Function Sign
# -------------------------------------------------------------------
def get_objectiveSign( ObjFun_name ):
""" returns -1 for maximization problems:
LIFT
EFFICIENCY
THRUST
FIGURE_OF_MERIT
returns +1 otherwise
"""
# flip sign for maximization problems
if ObjFun_name == "LIFT" : return -1.0
if ObjFun_name == "EFFICIENCY" : return -1.0
if ObjFun_name == "THRUST" : return -1.0
if ObjFun_name == "FIGURE_OF_MERIT" : return -1.0
# otherwise
return 1.0
#: def get_objectiveSign()
# -------------------------------------------------------------------
# Get Constraint Sign
# -------------------------------------------------------------------
def get_constraintSign( sign ):
""" gets +/-1 given a constraint sign < or > respectively
inequality constraint is posed as c(x) < 0
"""
sign_map = { '>' : -1.0 ,
'<' : +1.0 }
assert not sign=='=' , 'Sign "=" not valid'
return sign_map[sign]
#: def get_constraintSign()
# -------------------------------------------------------------------
# Get Adjoint Filename Suffix
# -------------------------------------------------------------------
def get_adjointSuffix(objective_function=None):
""" gets the adjoint suffix given an objective function """
# adjoint name map
name_map = { "DRAG" : "cd" ,
"LIFT" : "cl" ,
"SIDEFORCE" : "csf" ,
"MOMENT_X" : "cmx" ,
"MOMENT_Y" : "cmy" ,
"MOMENT_Z" : "cmz" ,
"FORCE_X" : "cfx" ,
"FORCE_Y" : "cfy" ,
"FORCE_Z" : "cfz" ,
"EFFICIENCY" : "eff" ,
"INVERSE_DESIGN_PRESSURE" : "invpress" ,
"INVERSE_DESIGN_HEAT" : "invheat" ,
"MAXIMUM_HEATFLUX" : "maxheat" ,
"TOTAL_HEATFLUX" : "totheat" ,
"EQUIVALENT_AREA" : "ea" ,
"NEARFIELD_PRESSURE" : "nfp" ,
"THRUST" : "ct" ,
"TORQUE" : "cq" ,
"FIGURE_OF_MERIT" : "merit" ,
"AVG_TOTAL_PRESSURE" : "pt" ,
"AVG_OUTLET_PRESSURE" : "pe" ,
"MASS_FLOW_RATE" : "mfw" ,
"FREE_SURFACE" : "fs" }
# if none or false, return map
if not objective_function:
return name_map
# return desired objective function suffix
elif name_map.has_key(objective_function):
return name_map[objective_function]
# otherwise...
else:
raise Exception('Unrecognized adjoint function name')
#: def get_adjointSuffix()
# -------------------------------------------------------------------
# Add a Suffix
# -------------------------------------------------------------------
def add_suffix(base_name,suffix):
""" suffix_name = add_suffix(base_name,suffix)
adds suffix to a filename, accounting for file type extension
example:
base_name = 'input.txt'
suffix = 'new'
suffix_name = 'input_new.txt'
"""
base_name = os.path.splitext(base_name)
suffix_name = base_name[0] + '_' + suffix + base_name[1]
return suffix_name
#: def add_suffix()
# -------------------------------------------------------------------
# Get Design Variable ID Map
# -------------------------------------------------------------------
def get_dvMap():
""" get dictionary that maps design variable
kind id number to name """
dv_map = { 1 : "HICKS_HENNE" ,
2 : "COSINE_BUMP" ,
3 : "SPHERICAL" ,
4 : "NACA_4DIGITS" ,
5 : "DISPLACEMENT" ,
6 : "ROTATION" ,
7 : "FFD_CONTROL_POINT" ,
8 : "FFD_DIHEDRAL_ANGLE" ,
9 : "FFD_TWIST_ANGLE" ,
10 : "FFD_ROTATION" ,
11 : "FFD_CAMBER" ,
12 : "FFD_THICKNESS" ,
14 : "FOURIER" ,
15 : "FFD_CONTROL_POINT_2D" ,
16 : "FFD_CAMBER_2D" ,
17 : "FFD_THICKNESS_2D" ,
101 : "MACH_NUMBER" ,
102 : "AOA" }
return dv_map
#: def get_dvMap()
# -------------------------------------------------------------------
# Get Design Variable Kind Name from ID
# -------------------------------------------------------------------
def get_dvKind( kindID ):
""" get design variable kind name from id number """
dv_map = get_dvMap()
try:
return dv_map[ kindID ]
except KeyError:
raise Exception('Unrecognized Design Variable ID')
# def get_dvKind()
# -------------------------------------------------------------------
# Get Design Variable Kind ID from Name
# -------------------------------------------------------------------
def get_dvID( kindName ):
""" get design variable kind id number from name """
dv_map = get_dvMap()
id_map = dict((v,k) for (k,v) in dv_map.iteritems())
try:
return id_map[ kindName ]
except KeyError:
raise Exception('Unrecognized Design Variable Name: %s' , kindName)
#: def get_dvID()
# -------------------------------------------------------------------
# Get Gradient File Header
# -------------------------------------------------------------------
def get_gradFileFormat(grad_type,plot_format,kindID,special_cases=[]):
# start header, build a list of strings and join at the end
header = []
write_format = []
# handle plot formating
if plot_format == 'TECPLOT':
header.append('VARIABLES=')
elif plot_format == 'PARAVIEW':
pass
else: raise Exception('output plot format not recognized')
# Case: continuous adjoint
if grad_type == 'CONTINUOUS_ADJOINT':
header.append(r'"iVar","Gradient","FinDiff_Step"')
write_format.append(r'%4d, %.10f, %f')
# Case: finite difference
elif grad_type == 'FINITE_DIFFERENCE':
header.append(r'"iVar","Grad_CLift","Grad_CDrag","Grad_CLDRatio","Grad_CSideForce","Grad_CMx","Grad_CMy","Grad_CMz","Grad_CFx","Grad_CFy","Grad_CFz","Grad_HeatFlux_Total","Grad_HeatFlux_Maximum"')
write_format.append(r'%4d, %.10f, %.10f, %.10f, %.10f, %.10f, %.10f, %.10f, %.10f, %.10f, %.10f, %.10f, %.10f')
for key in special_cases:
if key == "FREE_SURFACE" :
header.append(r',"Grad_CFreeSurface"')
write_format.append(", %.10f ")
if key == "ROTATING_FRAME" :
header.append(r',"Grad_CMerit","Grad_CT","Grad_CQ"')
write_format.append(", %.10f, %.10f, %.10f")
if key == "EQUIV_AREA" :
header.append(r',"Grad_CEquivArea","Grad_CNearFieldOF"')
write_format.append(", %.10f, %.10f")
if key == "1D_OUTPUT" :
header.append(r',"Grad_Avg_TotalPress","Grad_Avg_Mach","Grad_Avg_Temperature","Grad_MassFlowRate","Grad_FluxAvg_Pressure","Grad_FluxAvg_Density","Grad_FluxAvg_Velocity","Grad_FluxAvg_Enthalpy"')
write_format.append(", %.10f, %.10f, %.10f, %.10f, %.10f, %.10f, %.10f, %.10f")
if key == "INV_DESIGN_CP" :
header.append(r',"Grad_Cp_Diff"')
write_format.append(", %.10f")
if key == "INV_DESIGN_HEATFLUX" :
header.append(r',"Grad_HeatFlux_Diff"')
write_format.append(", %.10f")
# otherwise...
else: raise Exception('Unrecognized Gradient Type')
# design variable parameters
if kindID == "FFD_CONTROL_POINT_2D" :
header.append(r',"FFD_Box_ID","xIndex","yIndex","xAxis","yAxis"')
write_format.append(r', %s, %s, %s, %s, %s')
elif kindID == "FFD_CAMBER_2D" :
header.append(r',"FFD_Box_ID","xIndex"')
write_format.append(r', %s, %s')
elif kindID == "FFD_THICKNESS_2D" :
header.append(r',"FFD_Box_ID","xIndex"')
write_format.append(r', %s, %s')
elif kindID == "HICKS_HENNE" :
header.append(r',"Up/Down","Loc_Max"')
write_format.append(r', %s, %s')
elif kindID == "GAUSS_BUMP" :
header.append(r',"Up/Down","Loc_Max","Size_Bump"')
write_format.append(r', %s, %s, %s')
elif kindID == "FAIRING" :
header.append(r',"ControlPoint_Index","Theta_Disp","R_Disp"')
write_format.append(r', %s, %s, %s')
elif kindID == "NACA_4DIGITS" :
header.append(r',"1st_digit","2nd_digit","3rd&4th_digits"')
write_format.append(r', %s, %s, %s')
elif kindID == "DISPLACEMENT" :
header.append(r',"x_Disp","y_Disp","z_Disp"')
write_format.append(r', %s, %s, %s')
elif kindID == "ROTATION" :
header.append(r',"x_Orig","y_Orig","z_Orig","x_End","y_End","z_End"')
write_format.append(r', %s, %s, %s, %s, %s, %s')
elif kindID == "FFD_CONTROL_POINT" :
header.append(r',"FFD_Box_ID","xIndex","yIndex","zIndex","xAxis","yAxis","zAxis"')
write_format.append(r', %s, %s, %s, %s, %s, %s, %s')
elif kindID == "FFD_DIHEDRAL_ANGLE" :
header.append(r',"FFD_Box_ID","x_Orig","y_Orig","z_Orig","x_End","y_End","z_End"')
write_format.append(r', %s, %s, %s, %s, %s, %s, %s')
elif kindID == "FFD_TWIST_ANGLE" :
header.append(r',"FFD_Box_ID","x_Orig","y_Orig","z_Orig","x_End","y_End","z_End"')
write_format.append(r', %s, %s, %s, %s, %s, %s, %s')
elif kindID == "FFD_ROTATION" :
header.append(r',"FFD_Box_ID","x_Orig","y_Orig","z_Orig","x_End","y_End","z_End"')
write_format.append(r', %s, %s, %s, %s, %s, %s, %s')
elif kindID == "FFD_CAMBER" :
header.append(r',"FFD_Box_ID","xIndex","yIndex"')
write_format.append(r', %s, %s, %s')
elif kindID == "FFD_THICKNESS" :
header.append(r',"FFD_Box_ID","xIndex","yIndex"')
write_format.append(r', %s, %s, %s')
elif kindID == "MACH_NUMBER" : pass
elif kindID == "AOA" : pass
# otherwise...
else: raise Exception('Unrecognized Design Variable Kind')
# finite difference step
if grad_type == 'FINITE_DIFFERENCE':
header.append(r',"FinDiff_Step"')
write_format.append(r', %.10f')
# finish format
header.append('\n')
write_format.append('\n')
header = ''.join(header)
write_format = ''.join(write_format)
return [header,write_format]
#: def get_gradFileFormat()
# -------------------------------------------------------------------
# Get Optimization File Header
# -------------------------------------------------------------------
def get_optFileFormat(plot_format,special_cases=None):
if special_cases is None: special_cases = []
# start header, build a list of strings and join at the end
header_list = []
header_format = ''
write_format = []
# handle plot formating
if plot_format == 'TECPLOT':
header_format = header_format + 'VARIABLES='
elif plot_format == 'PARAVIEW':
pass
else: raise Exception('output plot format not recognized')
# start header
header_list.extend(["Iteration","CLift","CDrag","CSideForce","CMx","CMy","CMz","CFx","CFy","CFz","CEff","HeatFlux_Total","HeatFlux_Maximum"])
write_format.append(r'%4d, %.10f, %.10f, %.10f, %.10f, %.10f, %.10f, %.10f, %.10f, %.10f, %.10f, %.10f, %.10f')
# special cases
for key in special_cases:
if key == "FREE_SURFACE" :
header_list.extend(["CFreeSurface"])
write_format.append(r', %.10f ')
if key == "ROTATING_FRAME" :
header_list.extend(["CMerit","CT","CQ"])
write_format.append(r', %.10f, %.10f, %.10f')
if key == "EQUIV_AREA" :
header_list.extend(["CEquivArea","CNearFieldOF"])
write_format.append(r', %.10f, %.10f')
if key == "1D_OUTPUT":
header_list.extend(["Avg_TotalPress","Avg_Mach","Avg_Temperature","MassFlowRate","FluxAvg_Pressure","FluxAvg_Density","FluxAvg_Velocity","FluxAvg_Enthalpy"])
write_format.append(r', %.10f, %.10f, %.10f, %.10f, %.10f, %.10f, %.10f, %.10f')
if key == "INV_DESIGN_CP" :
header_list.extend(["Cp_Diff"])
write_format.append(r', %.10f')
if key == "INV_DESIGN_HEATFLUX" :
header_list.extend(["HeatFlux_Diff"])
write_format.append(r', %.10f')
# finish formats
header_format = (header_format) + ('"') + ('","').join(header_list) + ('"') + (' \n')
write_format = ''.join(write_format) + ' \n'
# build list of objective function names
header_vars = []
map_dict = get_headerMap()
for variable in header_list:
assert map_dict.has_key(variable) , 'unrecognized header variable'
header_vars.append(map_dict[variable])
# done
return [header_format,header_vars,write_format]
#: def get_optFileFormat()
# -------------------------------------------------------------------
# Get Extension Name
# -------------------------------------------------------------------
def get_extension(output_format):
if (output_format == "PARAVIEW") : return ".csv"
if (output_format == "TECPLOT") : return ".dat"
if (output_format == "TECPLOT_BINARY") : return ".plt"
if (output_format == "SOLUTION") : return ".dat"
if (output_format == "RESTART") : return ".dat"
if (output_format == "CONFIG") : return ".cfg"
# otherwise
raise Exception("Output Format Unknown")
#: def get_extension()
# -------------------------------------------------------------------
# Check Special Case
# -------------------------------------------------------------------
def get_specialCases(config):
""" returns a list of special physical problems that were
specified in the config file, and set to 'yes'
"""
all_special_cases = [ 'FREE_SURFACE' ,
'ROTATING_FRAME' ,
'EQUIV_AREA' ,
'1D_OUTPUT' ,
'INV_DESIGN_CP' ,
'INV_DESIGN_HEATFLUX' ]
special_cases = []
for key in all_special_cases:
if config.has_key(key) and config[key] == 'YES':
special_cases.append(key)
if config.has_key('PHYSICAL_PROBLEM') and config['PHYSICAL_PROBLEM'] == key:
special_cases.append(key)
if config.get('UNSTEADY_SIMULATION','NO') != 'NO':
special_cases.append('UNSTEADY_SIMULATION')
# no support for more than one special case
if len(special_cases) > 1:
error_str = 'Currently cannot support ' + ' and '.join(special_cases) + ' at once'
raise Exception(error_str)
if (config['WRT_SOL_FREQ'] != 1) and ('WRT_UNSTEADY' in special_cases):
raise Exception('Must set WRT_SOL_FREQ= 1 for WRT_UNSTEADY= YES')
# Special case for time-spectral
if config.has_key('UNSTEADY_SIMULATION') and config['UNSTEADY_SIMULATION'] == 'TIME_SPECTRAL':
special_cases.append('TIME_SPECTRAL')
# Special case for rotating frame
if config.has_key('GRID_MOVEMENT_KIND') and config['GRID_MOVEMENT_KIND'] == 'ROTATING_FRAME':
special_cases.append('ROTATING_FRAME')
return special_cases
#: def get_specialCases()
def next_folder(folder_format,num_format='%03d'):
""" folder = next_folder(folder_format,num_format='%03d')
finds the next folder with given format
Inputs:
folder_format - folder name with wild card (*) to mark expansion
num_format - %d formating to expand the wild card with
Outputs:
folder - a folder with the next index number inserted in
the wild card, first index is 1
"""
assert '*' in folder_format , 'wildcard (*) missing in folder_format name'
folders = glob.glob(folder_format)
split = folder_format.split('*')
folder = folder_format.replace('*',num_format)
if folders:
# find folder number, could be done with regex...
max_folder = max(folders)
if split[0]:
max_folder = max_folder.split(split[0])[1]
if split[1]:
max_folder = max_folder.rsplit(split[1])[0]
# last folder number
max_i = int(max_folder)
# increment folder number
folder = folder % (max_i+1)
else:
# first folder, number 1
folder = folder % 1
return folder
def expand_part(name,config):
names = [name]
return names
def expand_time(name,config):
if 'UNSTEADY_SIMULATION' in get_specialCases(config):
n_time = config['UNST_ADJOINT_ITER']
name_pat = add_suffix(name,'%05d')
names = [name_pat%i for i in range(n_time)]
else:
names = [name]
return names
def make_link(src,dst):
""" make_link(src,dst)
makes a relative link
Inputs:
src - source file
dst - destination to place link
Windows links currently unsupported, will copy file instead
"""
assert os.path.exists(src) , 'source file does not exist \n%s' % src
if os.name == 'nt':
# can't make a link in windows, need to look for other options
if os.path.exists(dst): os.remove(dst)
shutil.copy(src,dst)
else:
# find real file, incase source itself is a link
src = os.path.realpath(src)
# normalize paths
src = os.path.normpath(src)
dst = os.path.normpath(dst)
# check for self referencing
if src == dst: return
# find relative folder path
srcfolder = os.path.join( os.path.split(src)[0] ) + '/'
dstfolder = os.path.join( os.path.split(dst)[0] ) + '/'
srcfolder = os.path.relpath(srcfolder,dstfolder)
src = os.path.join( srcfolder, os.path.split(src)[1] )
# make unix link
if os.path.exists(dst): os.remove(dst)
os.symlink(src,dst)
def restart2solution(config,state={}):
""" restart2solution(config,state={})
moves restart file to solution file,
optionally updates state
direct or adjoint is read from config
adjoint objective is read from config
"""
# direct solution
if config.MATH_PROBLEM == 'DIRECT':
restart = config.RESTART_FLOW_FILENAME
solution = config.SOLUTION_FLOW_FILENAME
# expand unsteady time
restarts = expand_time(restart,config)
solutions = expand_time(solution,config)
# move
for res,sol in zip(restarts,solutions):
shutil.move( res , sol )
# update state
if state: state.FILES.DIRECT = solution
# adjoint solution
elif config.MATH_PROBLEM == 'ADJOINT':
restart = config.RESTART_ADJ_FILENAME
solution = config.SOLUTION_ADJ_FILENAME
# add suffix
func_name = config.OBJECTIVE_FUNCTION
suffix = get_adjointSuffix(func_name)
restart = add_suffix(restart,suffix)
solution = add_suffix(solution,suffix)
# expand unsteady time
restarts = expand_time(restart,config)
solutions = expand_time(solution,config)
# move
for res,sol in zip(restarts,solutions):
shutil.move( res , sol )
# udpate state
ADJ_NAME = 'ADJOINT_' + func_name
if state: state.FILES[ADJ_NAME] = solution
else:
raise Exception, 'unknown math problem'
|
chenbojian/SU2
|
SU2_PY/SU2/io/tools.py
|
Python
|
lgpl-2.1
| 33,910
|
[
"ParaView"
] |
9f1e4b172dfa39adef7f3f9763f050de85c7ccb9be11fb660faf5e71543a16cf
|
# coding: utf-8
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
"""
This module implements an XRD pattern calculator.
"""
import json
import os
from math import asin, cos, degrees, pi, radians, sin
import numpy as np
from pymatgen.symmetry.analyzer import SpacegroupAnalyzer
from .core import (
AbstractDiffractionPatternCalculator,
DiffractionPattern,
get_unique_families,
)
# XRD wavelengths in angstroms
WAVELENGTHS = {
"CuKa": 1.54184,
"CuKa2": 1.54439,
"CuKa1": 1.54056,
"CuKb1": 1.39222,
"MoKa": 0.71073,
"MoKa2": 0.71359,
"MoKa1": 0.70930,
"MoKb1": 0.63229,
"CrKa": 2.29100,
"CrKa2": 2.29361,
"CrKa1": 2.28970,
"CrKb1": 2.08487,
"FeKa": 1.93735,
"FeKa2": 1.93998,
"FeKa1": 1.93604,
"FeKb1": 1.75661,
"CoKa": 1.79026,
"CoKa2": 1.79285,
"CoKa1": 1.78896,
"CoKb1": 1.63079,
"AgKa": 0.560885,
"AgKa2": 0.563813,
"AgKa1": 0.559421,
"AgKb1": 0.497082,
}
with open(os.path.join(os.path.dirname(__file__), "atomic_scattering_params.json")) as f:
ATOMIC_SCATTERING_PARAMS = json.load(f)
class XRDCalculator(AbstractDiffractionPatternCalculator):
r"""
Computes the XRD pattern of a crystal structure.
This code is implemented by Shyue Ping Ong as part of UCSD's NANO106 -
Crystallography of Materials. The formalism for this code is based on
that given in Chapters 11 and 12 of Structure of Materials by Marc De
Graef and Michael E. McHenry. This takes into account the atomic
scattering factors and the Lorentz polarization factor, but not
the Debye-Waller (temperature) factor (for which data is typically not
available). Note that the multiplicity correction is not needed since
this code simply goes through all reciprocal points within the limiting
sphere, which includes all symmetrically equivalent facets. The algorithm
is as follows
1. Calculate reciprocal lattice of structure. Find all reciprocal points
within the limiting sphere given by :math:`\\frac{2}{\\lambda}`.
2. For each reciprocal point :math:`\\mathbf{g_{hkl}}` corresponding to
lattice plane :math:`(hkl)`, compute the Bragg condition
:math:`\\sin(\\theta) = \\frac{\\lambda}{2d_{hkl}}`
3. Compute the structure factor as the sum of the atomic scattering
factors. The atomic scattering factors are given by
.. math::
f(s) = Z - 41.78214 \\times s^2 \\times \\sum\\limits_{i=1}^n a_i \
\\exp(-b_is^2)
where :math:`s = \\frac{\\sin(\\theta)}{\\lambda}` and :math:`a_i`
and :math:`b_i` are the fitted parameters for each element. The
structure factor is then given by
.. math::
F_{hkl} = \\sum\\limits_{j=1}^N f_j \\exp(2\\pi i \\mathbf{g_{hkl}}
\\cdot \\mathbf{r})
4. The intensity is then given by the modulus square of the structure
factor.
.. math::
I_{hkl} = F_{hkl}F_{hkl}^*
5. Finally, the Lorentz polarization correction factor is applied. This
factor is given by:
.. math::
P(\\theta) = \\frac{1 + \\cos^2(2\\theta)}
{\\sin^2(\\theta)\\cos(\\theta)}
"""
# Tuple of available radiation keywords.
AVAILABLE_RADIATION = tuple(WAVELENGTHS.keys())
def __init__(self, wavelength="CuKa", symprec=0, debye_waller_factors=None):
"""
Initializes the XRD calculator with a given radiation.
Args:
wavelength (str/float): The wavelength can be specified as either a
float or a string. If it is a string, it must be one of the
supported definitions in the AVAILABLE_RADIATION class
variable, which provides useful commonly used wavelengths.
If it is a float, it is interpreted as a wavelength in
angstroms. Defaults to "CuKa", i.e, Cu K_alpha radiation.
symprec (float): Symmetry precision for structure refinement. If
set to 0, no refinement is done. Otherwise, refinement is
performed using spglib with provided precision.
debye_waller_factors ({element symbol: float}): Allows the
specification of Debye-Waller factors. Note that these
factors are temperature dependent.
"""
if isinstance(wavelength, (float, int)):
self.wavelength = wavelength
elif isinstance(wavelength, str):
self.radiation = wavelength
self.wavelength = WAVELENGTHS[wavelength]
else:
raise TypeError("'wavelength' must be either of: float, int or str")
self.symprec = symprec
self.debye_waller_factors = debye_waller_factors or {}
def get_pattern(self, structure, scaled=True, two_theta_range=(0, 90)):
"""
Calculates the diffraction pattern for a structure.
Args:
structure (Structure): Input structure
scaled (bool): Whether to return scaled intensities. The maximum
peak is set to a value of 100. Defaults to True. Use False if
you need the absolute values to combine XRD plots.
two_theta_range ([float of length 2]): Tuple for range of
two_thetas to calculate in degrees. Defaults to (0, 90). Set to
None if you want all diffracted beams within the limiting
sphere of radius 2 / wavelength.
Returns:
(XRDPattern)
"""
if self.symprec:
finder = SpacegroupAnalyzer(structure, symprec=self.symprec)
structure = finder.get_refined_structure()
wavelength = self.wavelength
latt = structure.lattice
is_hex = latt.is_hexagonal()
# Obtained from Bragg condition. Note that reciprocal lattice
# vector length is 1 / d_hkl.
min_r, max_r = (
(0, 2 / wavelength)
if two_theta_range is None
else [2 * sin(radians(t / 2)) / wavelength for t in two_theta_range]
)
# Obtain crystallographic reciprocal lattice points within range
recip_latt = latt.reciprocal_lattice_crystallographic
recip_pts = recip_latt.get_points_in_sphere([[0, 0, 0]], [0, 0, 0], max_r)
if min_r:
recip_pts = [pt for pt in recip_pts if pt[1] >= min_r]
# Create a flattened array of zs, coeffs, fcoords and occus. This is
# used to perform vectorized computation of atomic scattering factors
# later. Note that these are not necessarily the same size as the
# structure as each partially occupied specie occupies its own
# position in the flattened array.
zs = []
coeffs = []
fcoords = []
occus = []
dwfactors = []
for site in structure:
for sp, occu in site.species.items():
zs.append(sp.Z)
try:
c = ATOMIC_SCATTERING_PARAMS[sp.symbol]
except KeyError:
raise ValueError(
"Unable to calculate XRD pattern as "
"there is no scattering coefficients for"
" %s." % sp.symbol
)
coeffs.append(c)
dwfactors.append(self.debye_waller_factors.get(sp.symbol, 0))
fcoords.append(site.frac_coords)
occus.append(occu)
zs = np.array(zs)
coeffs = np.array(coeffs)
fcoords = np.array(fcoords)
occus = np.array(occus)
dwfactors = np.array(dwfactors)
peaks = {}
two_thetas = []
for hkl, g_hkl, ind, _ in sorted(recip_pts, key=lambda i: (i[1], -i[0][0], -i[0][1], -i[0][2])):
# Force miller indices to be integers.
hkl = [int(round(i)) for i in hkl]
if g_hkl != 0:
d_hkl = 1 / g_hkl
# Bragg condition
theta = asin(wavelength * g_hkl / 2)
# s = sin(theta) / wavelength = 1 / 2d = |ghkl| / 2 (d =
# 1/|ghkl|)
s = g_hkl / 2
# Store s^2 since we are using it a few times.
s2 = s ** 2
# Vectorized computation of g.r for all fractional coords and
# hkl.
g_dot_r = np.dot(fcoords, np.transpose([hkl])).T[0]
# Highly vectorized computation of atomic scattering factors.
# Equivalent non-vectorized code is::
#
# for site in structure:
# el = site.specie
# coeff = ATOMIC_SCATTERING_PARAMS[el.symbol]
# fs = el.Z - 41.78214 * s2 * sum(
# [d[0] * exp(-d[1] * s2) for d in coeff])
fs = zs - 41.78214 * s2 * np.sum(coeffs[:, :, 0] * np.exp(-coeffs[:, :, 1] * s2), axis=1)
dw_correction = np.exp(-dwfactors * s2)
# Structure factor = sum of atomic scattering factors (with
# position factor exp(2j * pi * g.r and occupancies).
# Vectorized computation.
f_hkl = np.sum(fs * occus * np.exp(2j * pi * g_dot_r) * dw_correction)
# Lorentz polarization correction for hkl
lorentz_factor = (1 + cos(2 * theta) ** 2) / (sin(theta) ** 2 * cos(theta))
# Intensity for hkl is modulus square of structure factor.
i_hkl = (f_hkl * f_hkl.conjugate()).real
two_theta = degrees(2 * theta)
if is_hex:
# Use Miller-Bravais indices for hexagonal lattices.
hkl = (hkl[0], hkl[1], -hkl[0] - hkl[1], hkl[2])
# Deal with floating point precision issues.
ind = np.where(
np.abs(np.subtract(two_thetas, two_theta)) < AbstractDiffractionPatternCalculator.TWO_THETA_TOL
)
if len(ind[0]) > 0:
peaks[two_thetas[ind[0][0]]][0] += i_hkl * lorentz_factor
peaks[two_thetas[ind[0][0]]][1].append(tuple(hkl))
else:
peaks[two_theta] = [i_hkl * lorentz_factor, [tuple(hkl)], d_hkl]
two_thetas.append(two_theta)
# Scale intensities so that the max intensity is 100.
max_intensity = max([v[0] for v in peaks.values()])
x = []
y = []
hkls = []
d_hkls = []
for k in sorted(peaks.keys()):
v = peaks[k]
fam = get_unique_families(v[1])
if v[0] / max_intensity * 100 > AbstractDiffractionPatternCalculator.SCALED_INTENSITY_TOL:
x.append(k)
y.append(v[0])
hkls.append([{"hkl": hkl, "multiplicity": mult} for hkl, mult in fam.items()])
d_hkls.append(v[2])
xrd = DiffractionPattern(x, y, hkls, d_hkls)
if scaled:
xrd.normalize(mode="max", value=100)
return xrd
|
gmatteo/pymatgen
|
pymatgen/analysis/diffraction/xrd.py
|
Python
|
mit
| 11,153
|
[
"CRYSTAL",
"pymatgen"
] |
0a197cffc7acec394433b818b25c569ed97a103e0e197ac872297639158b206d
|
# the inputs list tells MCEdit what kind of options to present to the user.
# each item is a (name, value) pair. name is a text string acting
# both as a text label for the input on-screen and a key for the 'options'
# parameter to perform(). value and its type indicate allowable and
# default values for the option:
# True or False: creates a checkbox with the given value as default
# int or float value: creates a value input with the given value as default
# int values create fields that only accept integers.
# tuple of numbers: a tuple of ints or floats creates a value input with minimum and
# maximum values. a 2-tuple specifies (min, max) with min as default.
# a 3-tuple specifies (default, min, max)
# tuple of strings: a tuple of strings creates a popup menu whose entries are
# labeled with the given strings. the first item in the tuple is selected
# by default. returns one of the strings in the tuple.
# "blocktype" as a string: creates a button the user can click to choose
# a block type in a list. returns a Block object. the object has 'ID'
# and 'blockData' attributes.
# this dictionary creates an integer input with range (-128, 128) and default 4,
# a blocktype picker, a floating-point input with no limits and default 15.0,
# a checkbox initially checked, and a menu of choices
inputs = (
("Depth", (4, -128, 128)),
("Pick a block:", "blocktype"),
("Fractal complexity", 15.0),
("Enable thrusters", True),
("Access method", ("Use blockAt", "Use temp schematic", "Use chunk slices")),
)
# perform() is the main entry point of a filter. Its parameters are
# a MCLevel instance, a BoundingBox, and an options dictionary.
# The options dictionary will have keys corresponding to the keys specified above,
# and values reflecting the user's input.
# you get undo for free: everything within 'box' is copied to a temporary buffer
# before perform is called, and then copied back when the user asks to undo
def perform(level, box, options):
blockType = options["Pick a block:"].ID
complexity = options["Fractal complexity"]
if options["Enable thrusters"]:
# Errors will alert the user and print a traceback to the console.
raise NotImplementedError("Thrusters not attached!")
method = options["Access method"]
# There are a few general ways of accessing a level's blocks
# The first is using level.blockAt and level.setBlockAt
# These are slower than the other two methods, but easier to start using
if method == "Use blockAt":
for x in xrange(box.minx, box.maxx):
for z in xrange(box.minz, box.maxz):
for y in xrange(box.miny, box.maxy): # nested loops can be slow
# replaces gold with TNT. straightforward.
if level.blockAt(x, y, z) == 14:
level.setBlockAt(x, y, z, 46)
# The second is to extract the segment of interest into a contiguous array
# using level.extractSchematic. this simplifies using numpy but at the cost
# of the temporary buffer and the risk of a memory error on 32-bit systems.
if method == "Use temp schematic":
temp = level.extractSchematic(box)
# remove any entities in the temp. this is an ugly move
# because copyBlocksFrom actually copies blocks, entities, everything
temp.removeEntitiesInBox(temp.bounds)
temp.removeTileEntitiesInBox(temp.bounds)
# replaces gold with TNT.
# the expression in [] creates a temporary the same size, using more memory
temp.Blocks[temp.Blocks == 14] = 46
level.copyBlocksFrom(temp, temp.bounds, box.origin)
# The third method iterates over each subslice of every chunk in the area
# using level.getChunkSlices. this method is a bit arcane, but lets you
# visit the affected area chunk by chunk without using too much memory.
if method == "Use chunk slices":
for (chunk, slices, point) in level.getChunkSlices(box):
# chunk is an AnvilChunk object with attributes:
# Blocks, Data, Entities, and TileEntities
# Blocks and Data can be indexed using slices:
blocks = chunk.Blocks[slices]
# blocks now contains a "view" on the part of the chunk's blocks
# that lie in the selection. This "view" is a numpy object that
# accesses only a subsection of the original array, without copying
# once again, gold into TNT
blocks[blocks == 14] = 46
# notify the world that the chunk changed
# this gives finer control over which chunks are dirtied
# you can call chunk.chunkChanged(False) if you want to dirty it
# but not run the lighting calc later.
chunk.chunkChanged()
# You can also access the level any way you want
# Beware though, you only get to undo the area within the specified box
pos = level.getPlayerPosition()
cpos = pos[0] >> 4, pos[2] >> 4
chunk = level.getChunk(*cpos)
chunk.Blocks[::4, ::4, :64] = 46 # replace every 4x4th column of land with TNT
|
mcedit/mcedit
|
filters/demo/filterdemo.py
|
Python
|
isc
| 5,188
|
[
"VisIt"
] |
1040e363d9509118f675abdb13c0be1fd36f7836661616ee43e0cf0e02dd7ba3
|
#! /usr/bin/python
"""versioneer.py
(like a rocketeer, but for versions)
* https://github.com/warner/python-versioneer
* Brian Warner
* License: Public Domain
* Version: 0.8+
This file helps distutils-based projects manage their version number by just
creating version-control tags.
For developers who work from a VCS-generated tree (e.g. 'git clone' etc),
each 'setup.py version', 'setup.py build', 'setup.py sdist' will compute a
version number by asking your version-control tool about the current
checkout. The version number will be written into a generated _version.py
file of your choosing, where it can be included by your __init__.py
For users who work from a VCS-generated tarball (e.g. 'git archive'), it will
compute a version number by looking at the name of the directory created when
te tarball is unpacked. This conventionally includes both the name of the
project and a version number.
For users who work from a tarball built by 'setup.py sdist', it will get a
version number from a previously-generated _version.py file.
As a result, loading code directly from the source tree will not result in a
real version. If you want real versions from VCS trees (where you frequently
update from the upstream repository, or do new development), you will need to
do a 'setup.py version' after each update, and load code from the build/
directory.
You need to provide this code with a few configuration values:
versionfile_source:
A project-relative pathname into which the generated version strings
should be written. This is usually a _version.py next to your project's
main __init__.py file. If your project uses src/myproject/__init__.py,
this should be 'src/myproject/_version.py'. This file should be checked
in to your VCS as usual: the copy created below by 'setup.py
update_files' will include code that parses expanded VCS keywords in
generated tarballs. The 'build' and 'sdist' commands will replace it with
a copy that has just the calculated version string.
versionfile_build:
Like versionfile_source, but relative to the build directory instead of
the source directory. These will differ when your setup.py uses
'package_dir='. If you have package_dir={'myproject': 'src/myproject'},
then you will probably have versionfile_build='myproject/_version.py' and
versionfile_source='src/myproject/_version.py'.
tag_prefix: a string, like 'PROJECTNAME-', which appears at the start of all
VCS tags. If your tags look like 'myproject-1.2.0', then you
should use tag_prefix='myproject-'. If you use unprefixed tags
like '1.2.0', this should be an empty string.
parentdir_prefix: a string, frequently the same as tag_prefix, which
appears at the start of all unpacked tarball filenames. If
your tarball unpacks into 'myproject-1.2.0', this should
be 'myproject-'.
To use it:
1: include this file in the top level of your project
2: make the following changes to the top of your setup.py:
import versioneer
versioneer.versionfile_source = 'src/myproject/_version.py'
versioneer.versionfile_build = 'myproject/_version.py'
versioneer.tag_prefix = '' # tags are like 1.2.0
versioneer.parentdir_prefix = 'myproject-' # dirname like 'myproject-1.2.0'
3: add the following arguments to the setup() call in your setup.py:
version=versioneer.get_version(),
cmdclass=versioneer.get_cmdclass(),
4: run 'setup.py update_files', which will create _version.py, and will
modify your __init__.py to define __version__ (by calling a function
from _version.py)
5: modify your MANIFEST.in to include versioneer.py
6: add both versioneer.py and the generated _version.py to your VCS
"""
import os, sys, re
from distutils.core import Command
from distutils.command.sdist import sdist as _sdist
from distutils.command.build import build as _build
versionfile_source = None
versionfile_build = None
tag_prefix = None
parentdir_prefix = None
VCS = "git"
LONG_VERSION_PY = '''
# This file helps to compute a version number in source trees obtained from
# git-archive tarball (such as those provided by githubs download-from-tag
# feature). Distribution tarballs (build by setup.py sdist) and build
# directories (produced by setup.py build) will contain a much shorter file
# that just contains the computed version number.
# This file is released into the public domain. Generated by
# versioneer-0.8+ (https://github.com/warner/python-versioneer)
# these strings will be replaced by git during git-archive
git_refnames = "%(DOLLAR)sFormat:%%d%(DOLLAR)s"
git_full = "%(DOLLAR)sFormat:%%H%(DOLLAR)s"
import subprocess
import sys
import errno
def run_command(commands, args, cwd=None, verbose=False, hide_stderr=False):
assert isinstance(commands, list)
p = None
for c in commands:
try:
# remember shell=False, so use git.cmd on windows, not just git
p = subprocess.Popen([c] + args, cwd=cwd, stdout=subprocess.PIPE,
stderr=(subprocess.PIPE if hide_stderr
else None))
break
except EnvironmentError:
e = sys.exc_info()[1]
if e.errno == errno.ENOENT:
continue
if verbose:
print("unable to run %%s" %% args[0])
print(e)
return None
else:
if verbose:
print("unable to find command, tried %%s" %% (commands,))
return None
stdout = p.communicate()[0].strip()
if sys.version >= '3':
stdout = stdout.decode()
if p.returncode != 0:
if verbose:
print("unable to run %%s (error)" %% args[0])
return None
return stdout
import sys
import re
import os.path
def get_expanded_variables(versionfile_abs):
# the code embedded in _version.py can just fetch the value of these
# variables. When used from setup.py, we don't want to import
# _version.py, so we do it with a regexp instead. This function is not
# used from _version.py.
variables = {}
try:
f = open(versionfile_abs,"r")
for line in f.readlines():
if line.strip().startswith("git_refnames ="):
mo = re.search(r'=\s*"(.*)"', line)
if mo:
variables["refnames"] = mo.group(1)
if line.strip().startswith("git_full ="):
mo = re.search(r'=\s*"(.*)"', line)
if mo:
variables["full"] = mo.group(1)
f.close()
except EnvironmentError:
pass
return variables
def versions_from_expanded_variables(variables, tag_prefix, verbose=False):
refnames = variables["refnames"].strip()
if refnames.startswith("$Format"):
if verbose:
print("variables are unexpanded, not using")
return {} # unexpanded, so not in an unpacked git-archive tarball
refs = set([r.strip() for r in refnames.strip("()").split(",")])
# starting in git-1.8.3, tags are listed as "tag: foo-1.0" instead of
# just "foo-1.0". If we see a "tag: " prefix, prefer those.
TAG = "tag: "
tags = set([r[len(TAG):] for r in refs if r.startswith(TAG)])
if not tags:
# Either we're using git < 1.8.3, or there really are no tags. We use
# a heuristic: assume all version tags have a digit. The old git %%d
# expansion behaves like git log --decorate=short and strips out the
# refs/heads/ and refs/tags/ prefixes that would let us distinguish
# between branches and tags. By ignoring refnames without digits, we
# filter out many common branch names like "release" and
# "stabilization", as well as "HEAD" and "master".
tags = set([r for r in refs if re.search(r'\d', r)])
if verbose:
print("discarding '%%s', no digits" %% ",".join(refs-tags))
if verbose:
print("likely tags: %%s" %% ",".join(sorted(tags)))
for ref in sorted(tags):
# sorting will prefer e.g. "2.0" over "2.0rc1"
if ref.startswith(tag_prefix):
r = ref[len(tag_prefix):]
if verbose:
print("picking %%s" %% r)
return { "version": r,
"full": variables["full"].strip() }
# no suitable tags, so we use the full revision id
if verbose:
print("no suitable tags, using full revision id")
return { "version": variables["full"].strip(),
"full": variables["full"].strip() }
def versions_from_vcs(tag_prefix, root, verbose=False):
# this runs 'git' from the root of the source tree. This only gets called
# if the git-archive 'subst' variables were *not* expanded, and
# _version.py hasn't already been rewritten with a short version string,
# meaning we're inside a checked out source tree.
if not os.path.exists(os.path.join(root, ".git")):
if verbose:
print("no .git in %%s" %% root)
return {}
GITS = ["git"]
if sys.platform == "win32":
GITS = ["git.cmd", "git.exe"]
stdout = run_command(GITS, ["describe", "--tags", "--dirty", "--always"],
cwd=root)
if stdout is None:
return {}
if not stdout.startswith(tag_prefix):
if verbose:
print("tag '%%s' doesn't start with prefix '%%s'" %% (stdout, tag_prefix))
return {}
tag = stdout[len(tag_prefix):]
stdout = run_command(GITS, ["rev-parse", "HEAD"], cwd=root)
if stdout is None:
return {}
full = stdout.strip()
if tag.endswith("-dirty"):
full += "-dirty"
return {"version": tag, "full": full}
def versions_from_parentdir(parentdir_prefix, root, verbose=False):
# Source tarballs conventionally unpack into a directory that includes
# both the project name and a version string.
dirname = os.path.basename(root)
if not dirname.startswith(parentdir_prefix):
if verbose:
print("guessing rootdir is '%%s', but '%%s' doesn't start with prefix '%%s'" %%
(root, dirname, parentdir_prefix))
return None
return {"version": dirname[len(parentdir_prefix):], "full": ""}
tag_prefix = "%(TAG_PREFIX)s"
parentdir_prefix = "%(PARENTDIR_PREFIX)s"
versionfile_source = "%(VERSIONFILE_SOURCE)s"
def get_versions(default={"version": "unknown", "full": ""}, verbose=False):
# I am in _version.py, which lives at ROOT/VERSIONFILE_SOURCE. If we have
# __file__, we can work backwards from there to the root. Some
# py2exe/bbfreeze/non-CPython implementations don't do __file__, in which
# case we can only use expanded variables.
variables = { "refnames": git_refnames, "full": git_full }
ver = versions_from_expanded_variables(variables, tag_prefix, verbose)
if ver:
return ver
try:
root = os.path.abspath(__file__)
# versionfile_source is the relative path from the top of the source
# tree (where the .git directory might live) to this file. Invert
# this to find the root from __file__.
for i in range(len(versionfile_source.split("/"))):
root = os.path.dirname(root)
except NameError:
return default
return (versions_from_vcs(tag_prefix, root, verbose)
or versions_from_parentdir(parentdir_prefix, root, verbose)
or default)
'''
import subprocess
import sys
import errno
def run_command(commands, args, cwd=None, verbose=False, hide_stderr=False):
assert isinstance(commands, list)
p = None
for c in commands:
try:
# remember shell=False, so use git.cmd on windows, not just git
p = subprocess.Popen([c] + args, cwd=cwd, stdout=subprocess.PIPE,
stderr=(subprocess.PIPE if hide_stderr
else None))
break
except EnvironmentError:
e = sys.exc_info()[1]
if e.errno == errno.ENOENT:
continue
if verbose:
print("unable to run %s" % args[0])
print(e)
return None
else:
if verbose:
print("unable to find command, tried %s" % (commands,))
return None
stdout = p.communicate()[0].strip()
if sys.version >= '3':
stdout = stdout.decode()
if p.returncode != 0:
if verbose:
print("unable to run %s (error)" % args[0])
return None
return stdout
import sys
import re
import os.path
def get_expanded_variables(versionfile_abs):
# the code embedded in _version.py can just fetch the value of these
# variables. When used from setup.py, we don't want to import
# _version.py, so we do it with a regexp instead. This function is not
# used from _version.py.
variables = {}
try:
f = open(versionfile_abs,"r")
for line in f.readlines():
if line.strip().startswith("git_refnames ="):
mo = re.search(r'=\s*"(.*)"', line)
if mo:
variables["refnames"] = mo.group(1)
if line.strip().startswith("git_full ="):
mo = re.search(r'=\s*"(.*)"', line)
if mo:
variables["full"] = mo.group(1)
f.close()
except EnvironmentError:
pass
return variables
def versions_from_expanded_variables(variables, tag_prefix, verbose=False):
refnames = variables["refnames"].strip()
if refnames.startswith("$Format"):
if verbose:
print("variables are unexpanded, not using")
return {} # unexpanded, so not in an unpacked git-archive tarball
refs = set([r.strip() for r in refnames.strip("()").split(",")])
# starting in git-1.8.3, tags are listed as "tag: foo-1.0" instead of
# just "foo-1.0". If we see a "tag: " prefix, prefer those.
TAG = "tag: "
tags = set([r[len(TAG):] for r in refs if r.startswith(TAG)])
if not tags:
# Either we're using git < 1.8.3, or there really are no tags. We use
# a heuristic: assume all version tags have a digit. The old git %d
# expansion behaves like git log --decorate=short and strips out the
# refs/heads/ and refs/tags/ prefixes that would let us distinguish
# between branches and tags. By ignoring refnames without digits, we
# filter out many common branch names like "release" and
# "stabilization", as well as "HEAD" and "master".
tags = set([r for r in refs if re.search(r'\d', r)])
if verbose:
print("discarding '%s', no digits" % ",".join(refs-tags))
if verbose:
print("likely tags: %s" % ",".join(sorted(tags)))
for ref in sorted(tags):
# sorting will prefer e.g. "2.0" over "2.0rc1"
if ref.startswith(tag_prefix):
r = ref[len(tag_prefix):]
if verbose:
print("picking %s" % r)
return { "version": r,
"full": variables["full"].strip() }
# no suitable tags, so we use the full revision id
if verbose:
print("no suitable tags, using full revision id")
return { "version": variables["full"].strip(),
"full": variables["full"].strip() }
def versions_from_vcs(tag_prefix, root, verbose=False):
# this runs 'git' from the root of the source tree. This only gets called
# if the git-archive 'subst' variables were *not* expanded, and
# _version.py hasn't already been rewritten with a short version string,
# meaning we're inside a checked out source tree.
if not os.path.exists(os.path.join(root, ".git")):
if verbose:
print("no .git in %s" % root)
return {}
GITS = ["git"]
if sys.platform == "win32":
GITS = ["git.cmd", "git.exe"]
stdout = run_command(GITS, ["describe", "--tags", "--dirty", "--always"],
cwd=root)
if stdout is None:
return {}
if not stdout.startswith(tag_prefix):
if verbose:
print("tag '%s' doesn't start with prefix '%s'" % (stdout, tag_prefix))
return {}
tag = stdout[len(tag_prefix):]
stdout = run_command(GITS, ["rev-parse", "HEAD"], cwd=root)
if stdout is None:
return {}
full = stdout.strip()
if tag.endswith("-dirty"):
full += "-dirty"
return {"version": tag, "full": full}
def versions_from_parentdir(parentdir_prefix, root, verbose=False):
# Source tarballs conventionally unpack into a directory that includes
# both the project name and a version string.
dirname = os.path.basename(root)
if not dirname.startswith(parentdir_prefix):
if verbose:
print("guessing rootdir is '%s', but '%s' doesn't start with prefix '%s'" %
(root, dirname, parentdir_prefix))
return None
return {"version": dirname[len(parentdir_prefix):], "full": ""}
import os.path
import sys
# os.path.relpath only appeared in Python-2.6 . Define it here for 2.5.
def os_path_relpath(path, start=os.path.curdir):
"""Return a relative version of a path"""
if not path:
raise ValueError("no path specified")
start_list = [x for x in os.path.abspath(start).split(os.path.sep) if x]
path_list = [x for x in os.path.abspath(path).split(os.path.sep) if x]
# Work out how much of the filepath is shared by start and path.
i = len(os.path.commonprefix([start_list, path_list]))
rel_list = [os.path.pardir] * (len(start_list)-i) + path_list[i:]
if not rel_list:
return os.path.curdir
return os.path.join(*rel_list)
def do_vcs_install(versionfile_source, ipy):
GITS = ["git"]
if sys.platform == "win32":
GITS = ["git.cmd", "git.exe"]
files = [versionfile_source, ipy]
try:
me = __file__
if me.endswith(".pyc") or me.endswith(".pyo"):
me = os.path.splitext(me)[0] + ".py"
versioneer_file = os_path_relpath(me)
except NameError:
versioneer_file = "versioneer.py"
files.append(versioneer_file)
present = False
try:
f = open(".gitattributes", "r")
for line in f.readlines():
if line.strip().startswith(versionfile_source):
if "export-subst" in line.strip().split()[1:]:
present = True
f.close()
except EnvironmentError:
pass
if not present:
f = open(".gitattributes", "a+")
f.write("%s export-subst\n" % versionfile_source)
f.close()
files.append(".gitattributes")
run_command(GITS, ["add", "--"] + files)
SHORT_VERSION_PY = """
# This file was generated by 'versioneer.py' (0.8+) from
# revision-control system data, or from the parent directory name of an
# unpacked source archive. Distribution tarballs contain a pre-generated copy
# of this file.
version_version = '%(version)s'
version_full = '%(full)s'
def get_versions(default={}, verbose=False):
return {'version': version_version, 'full': version_full}
"""
DEFAULT = {"version": "unknown", "full": "unknown"}
def versions_from_file(filename):
versions = {}
try:
f = open(filename)
except EnvironmentError:
return versions
for line in f.readlines():
mo = re.match("version_version = '([^']+)'", line)
if mo:
versions["version"] = mo.group(1)
mo = re.match("version_full = '([^']+)'", line)
if mo:
versions["full"] = mo.group(1)
f.close()
return versions
def write_to_version_file(filename, versions):
f = open(filename, "w")
f.write(SHORT_VERSION_PY % versions)
f.close()
print("set %s to '%s'" % (filename, versions["version"]))
def get_versions(default=DEFAULT, verbose=False):
# returns dict with two keys: 'version' and 'full'
assert versionfile_source is not None, "please set versioneer.versionfile_source"
assert tag_prefix is not None, "please set versioneer.tag_prefix"
assert parentdir_prefix is not None, "please set versioneer.parentdir_prefix"
# I am in versioneer.py, which must live at the top of the source tree,
# which we use to compute the root directory. py2exe/bbfreeze/non-CPython
# don't have __file__, in which case we fall back to sys.argv[0] (which
# ought to be the setup.py script). We prefer __file__ since that's more
# robust in cases where setup.py was invoked in some weird way (e.g. pip)
try:
root = os.path.dirname(os.path.abspath(__file__))
except NameError:
root = os.path.dirname(os.path.abspath(sys.argv[0]))
versionfile_abs = os.path.join(root, versionfile_source)
# extract version from first of _version.py, 'git describe', parentdir.
# This is meant to work for developers using a source checkout, for users
# of a tarball created by 'setup.py sdist', and for users of a
# tarball/zipball created by 'git archive' or github's download-from-tag
# feature.
variables = get_expanded_variables(versionfile_abs)
if variables:
ver = versions_from_expanded_variables(variables, tag_prefix)
if ver:
if verbose: print("got version from expanded variable %s" % ver)
return ver
ver = versions_from_file(versionfile_abs)
if ver:
if verbose: print("got version from file %s %s" % (versionfile_abs,ver))
return ver
ver = versions_from_vcs(tag_prefix, root, verbose)
if ver:
if verbose: print("got version from git %s" % ver)
return ver
ver = versions_from_parentdir(parentdir_prefix, root, verbose)
if ver:
if verbose: print("got version from parentdir %s" % ver)
return ver
if verbose: print("got version from default %s" % ver)
return default
def get_version(verbose=False):
return get_versions(verbose=verbose)["version"]
class cmd_version(Command):
description = "report generated version string"
user_options = []
boolean_options = []
def initialize_options(self):
pass
def finalize_options(self):
pass
def run(self):
ver = get_version(verbose=True)
print("Version is currently: %s" % ver)
class cmd_build(_build):
def run(self):
versions = get_versions(verbose=True)
_build.run(self)
# now locate _version.py in the new build/ directory and replace it
# with an updated value
target_versionfile = os.path.join(self.build_lib, versionfile_build)
print("UPDATING %s" % target_versionfile)
os.unlink(target_versionfile)
f = open(target_versionfile, "w")
f.write(SHORT_VERSION_PY % versions)
f.close()
class cmd_sdist(_sdist):
def run(self):
versions = get_versions(verbose=True)
self._versioneer_generated_versions = versions
# unless we update this, the command will keep using the old version
self.distribution.metadata.version = versions["version"]
return _sdist.run(self)
def make_release_tree(self, base_dir, files):
_sdist.make_release_tree(self, base_dir, files)
# now locate _version.py in the new base_dir directory (remembering
# that it may be a hardlink) and replace it with an updated value
target_versionfile = os.path.join(base_dir, versionfile_source)
print("UPDATING %s" % target_versionfile)
os.unlink(target_versionfile)
f = open(target_versionfile, "w")
f.write(SHORT_VERSION_PY % self._versioneer_generated_versions)
f.close()
INIT_PY_SNIPPET = """
from ._version import get_versions
__version__ = get_versions()['version']
del get_versions
"""
class cmd_update_files(Command):
description = "modify __init__.py and create _version.py"
user_options = []
boolean_options = []
def initialize_options(self):
pass
def finalize_options(self):
pass
def run(self):
ipy = os.path.join(os.path.dirname(versionfile_source), "__init__.py")
print(" creating %s" % versionfile_source)
f = open(versionfile_source, "w")
f.write(LONG_VERSION_PY % {"DOLLAR": "$",
"TAG_PREFIX": tag_prefix,
"PARENTDIR_PREFIX": parentdir_prefix,
"VERSIONFILE_SOURCE": versionfile_source,
})
f.close()
try:
old = open(ipy, "r").read()
except EnvironmentError:
old = ""
if INIT_PY_SNIPPET not in old:
print(" appending to %s" % ipy)
f = open(ipy, "a")
f.write(INIT_PY_SNIPPET)
f.close()
else:
print(" %s unmodified" % ipy)
do_vcs_install(versionfile_source, ipy)
def get_cmdclass():
return {'version': cmd_version,
'update_files': cmd_update_files,
'build': cmd_build,
'sdist': cmd_sdist,
}
|
tvd-dataset/TheBigBangTheory
|
versioneer.py
|
Python
|
mit
| 25,132
|
[
"Brian"
] |
c82471d0f49942515f6ea66aa35117113cd56a9df141a1050ed5ffbd247da5ce
|
#
# io - Data input and output
#
from info import __doc__
from numpy import deprecate
# matfile read and write
from matlab import loadmat, savemat
# netCDF file support
from netcdf import netcdf_file, netcdf_variable
from matlab import byteordercodes
from data_store import save_as_module
from mmio import mminfo, mmread, mmwrite
from idl import readsav
__all__ = filter(lambda s:not s.startswith('_'),dir())
from numpy.testing import Tester
test = Tester().test
|
scipy/scipy-svn
|
scipy/io/__init__.py
|
Python
|
bsd-3-clause
| 470
|
[
"NetCDF"
] |
f5418f61d838c23120537a12baf6e106f2ec8b45ddd7274c57e390ab9a350082
|
# Downloaded from http://www.logarithmic.net/pfh-files/blog/01208083168/sort.py
"""
Tarjan's algorithm and topological sorting implementation in Python
by Paul Harrison
Public domain, do with it as you will
"""
def strongly_connected_components(graph):
""" Find the strongly connected components in a graph using
Tarjan's algorithm.
graph should be a dictionary mapping node names to
lists of successor nodes.
"""
result = [ ]
stack = [ ]
low = { }
def visit(node):
if node in low: return
if node not in graph: graph[node] = []
num = len(low)
low[node] = num
stack_pos = len(stack)
stack.append(node)
for successor in graph[node]:
visit(successor)
low[node] = min(low[node], low[successor])
if num == low[node]:
component = tuple(stack[stack_pos:])
del stack[stack_pos:]
result.append(component)
for item in component:
low[item] = len(graph)
for node in dict(graph):
visit(node)
return result
def topological_sort(graph):
count = { }
for node in graph:
count[node] = 0
for node in graph:
for successor in graph[node]:
count[successor] += 1
ready = [ node for node in graph if count[node] == 0 ]
result = [ ]
while ready:
node = ready.pop(-1)
result.append(node)
for successor in graph[node]:
count[successor] -= 1
if count[successor] == 0:
ready.append(successor)
return result
def robust_topological_sort(graph):
""" First identify strongly connected components,
then perform a topological sort on these components. """
components = strongly_connected_components(graph)
node_component = { }
for component in components:
for node in component:
node_component[node] = component
component_graph = { }
for component in components:
component_graph[component] = [ ]
for node in graph:
node_c = node_component[node]
for successor in graph[node]:
successor_c = node_component[successor]
if node_c != successor_c:
component_graph[node_c].append(successor_c)
return topological_sort(component_graph)
if __name__ == '__main__':
d = {
0 : [1],
1 : [2],
2 : [1,3],
3 : [3],
}
#print d
#print robust_topological_sort(d)
d = {0 : [1, 2, 4], 1 : [3, 4], 2 : [0, 3], 3 : [], 4: [1]}
print(d)
print("scc", strongly_connected_components(d))
print("rts", robust_topological_sort(d))
|
joxeankoret/diaphora
|
others/tarjan_sort.py
|
Python
|
agpl-3.0
| 2,807
|
[
"VisIt"
] |
3d24ca359cc900d317a7efac392da2b43c407fb3b559fe92c451aaad4ad18e47
|
"""User preferences for KlustaViewa."""
# -----------------------------------------------------------------------------
# Imports
# -----------------------------------------------------------------------------
import logging
import numpy as np
# -----------------------------------------------------------------------------
# Logging
# -----------------------------------------------------------------------------
# Console logging level, can be DEBUG, INFO or WARNING.
loglevel = logging.INFO
# Level of the logging file. DEBUG, INFO or WARNING, or just None to disable.
loglevel_file = logging.INFO
# -----------------------------------------------------------------------------
# Main window
# -----------------------------------------------------------------------------
# Should the software ask the user to save upon closing?
prompt_save_on_exit = True
delay_timer = .05
delay_buffer = .1
# -----------------------------------------------------------------------------
# Similarity matrix
# -----------------------------------------------------------------------------
similarity_measure = 'gaussian' # or 'kl' for KL divergence
# -----------------------------------------------------------------------------
# Waveform view
# -----------------------------------------------------------------------------
# Approximate maximum number of spikes pper cluster to show. Should be
# about 100 for low-end graphics cards, 1000 for high-end ones.
waveforms_nspikes_max_expected = 100
# The minimum number of spikes per cluster to display.
waveforms_nspikes_per_cluster_min = 10
# -----------------------------------------------------------------------------
# Feature view
# -----------------------------------------------------------------------------
# Opacity value of the background spikes.
feature_background_alpha = .25
# Opacity value of the spikes in the selected clusters.
feature_selected_alpha = .75
# Number of spikes to show in the background.
features_nspikes_background_max = 10000
# Maximum number of spikes per cluster to show.
features_nspikes_per_cluster_max = 1000
# Unit of the spike time in the feature view. Can be 'samples' or 'second'.
features_info_time_unit = 'second'
# -----------------------------------------------------------------------------
# Correlograms view
# -----------------------------------------------------------------------------
# Maximum number of clusters to show in the correlograms view.
correlograms_max_nclusters = 20
correlograms_nexcerpts = 100
correlograms_excerpt_size = 20000
# -----------------------------------------------------------------------------
# IPython import path
# -----------------------------------------------------------------------------
# Paths where all .py files are loaded in IPython view.
# "~" corresponds to the user home path, C:\Users\Username\ on Windows,
# /home/username/ on Linux, etc.
ipython_import_paths = ['~/.kwiklib/code']
# -----------------------------------------------------------------------------
# Unit tests
# -----------------------------------------------------------------------------
# Delay between two successive automatic operations in unit tests for views.
test_operator_delay = .1
# Whether to automatically close the views during unit testing.
test_auto_close = True
|
DavidTingley/ephys-processing-pipeline
|
installation/klustaviewa-0.3.0/kwiklib/utils/preferences_default.py
|
Python
|
gpl-3.0
| 3,402
|
[
"Gaussian"
] |
4fb6fdf0b9217cf44912a501065f570336f71131bc63d94fb65b6af88e5af2de
|
from ase import *
from hotbit import *
from ase.data.molecules import molecule
atoms=molecule('C6H6')
atoms.set_pbc(False)
atoms.center(vacuum=3)
calc=Hotbit(SCC=True, txt='benzene.cal')
atoms.set_calculator(calc)
traj=PickleTrajectory('quench.traj','w',atoms=atoms)
qn=QuasiNewton(atoms)
qn.attach(traj.write)
qn.run()
|
pekkosk/hotbit
|
hotbit/doc/examples/benzene.py
|
Python
|
gpl-2.0
| 322
|
[
"ASE"
] |
45f3a8a846b7c442b0b1cbf65d09873af278ff9a92f5bb7a72c24e03488b8ff6
|
###########################################################################
#
# This program is part of Zenoss Core, an open source monitoring platform.
# Copyright (C) 2007, Zenoss Inc.
#
# This program is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License version 2 as published by
# the Free Software Foundation.
#
# For complete information please visit: http://www.zenoss.com/oss/
#
###########################################################################
from Products.DataCollector.plugins.CollectorPlugin import SnmpPlugin, GetMap
class AsteriskDeviceMap(SnmpPlugin):
"""Map mib elements from Asterisk mib to get hw and os products.
"""
maptype = "AsteriskDeviceMap"
snmpGetMap = GetMap({
#'' : 'manufacturer',
'.1.3.6.1.2.1.1.1.0' : 'setHWProductKey',
#'.1.3.6.1.4.1.272.4.1.31.0' : 'setHWSerialNumber',
'.1.3.6.1.4.1.22736.1.1.1.0': 'setOSProductKey',
})
def process(self, device, results, log):
"""collect snmp information from this device"""
log.info('processing %s for device %s', self.name(), device.id)
getdata, tabledata = results
if getdata['setHWProductKey'] is None: return None
om = self.objectMap(getdata)
return om
|
zenoss/Community-Zenpacks
|
ZenPacks.AndreaConsadori.Asterisk16/ZenPacks/AndreaConsadori/Asterisk16/modeler/plugins/AsteriskDeviceMap.py
|
Python
|
gpl-2.0
| 1,317
|
[
"VisIt"
] |
63fb3711d651a6536272f9c15eb3500f4b375e836081c3032e9d5937791c97b4
|
"""
Fit the mass-ratio using emcee
"""
import os
import numpy as np
import emcee
import matplotlib.pyplot as plt
import seaborn as sns
from matplotlib import gridspec
from matplotlib.ticker import FuncFormatter
import pandas as pd
from CombineCCFs import get_rv
sns.set_context('paper', font_scale=2.0)
# Functions for emcee
def lnlike_partial(pars, v, v_err, rv1_pred):
q, dv, lnf = pars
rv2_pred = -rv1_pred / q
inv_sigma2 = 1.0/(np.exp(lnf)*v_err**2)
return -0.5*np.nansum((rv2_pred - (v+rv1_pred-dv))**2 * inv_sigma2 - np.log(inv_sigma2))
def lnprior_partial(pars):
q, dv, lnf = pars
if 0 < q < 1 and -20 < dv < 20:
return 0.0
return -np.inf
def lnprob_partial(pars, v, v_err, rv1_pred):
lp = lnprior_partial(pars)
return lp + lnlike_partial(pars, v, v_err, rv1_pred) if np.isfinite(lp) else -np.inf
# Function to get the chains given a set of parameters for the primary fit
def fit_partial(T0, P, e, K1, w, t, rv2, rv2_err):
"""
Get MCMC samples for the mass-ratio, velocity shift, and error scaling
for the orbital parameters T0-w (fit by Stefano Meschiari)
"""
# Get the predicted velocity of the primary at each time
rv1_pred = get_rv(T0=T0, P=P, e=e, K1=K1, w=w*np.pi/180, t=t)
# Initialize MCMC sampler
initial_pars = [0.47, -5.4, -3.6]
ndim = len(initial_pars)
nwalkers = 100
p0 = emcee.utils.sample_ball(initial_pars, std=[1e-6]*ndim, size=nwalkers)
sampler = emcee.EnsembleSampler(nwalkers, ndim, lnprob_partial, args=(rv2, rv2_err, rv1_pred), threads=2)
# Run the sampler
pos, lp, state = sampler.run_mcmc(p0, 1000)
# Save the last 500 (we will just have to hope that the sampler sufficiently burns-in in 500 steps. That is true in tests I've done)
samples = sampler.chain[:, 500:, :].reshape((-1, ndim))
return samples
def lnlike_full(pars, t1, v1, v1_err, t2, v2, v2_err):
K1, K2, P, T0, w, e, dv1, dv2, gamma, lnf, noise1 = pars
rv1_pred = get_rv(T0=T0, P=P, e=e, K1=K1, w=w, t=t1) + dv1 + gamma
rv2_pred = -get_rv(T0=T0, P=P, e=e, K1=K2, w=w, t=t2)
inv_sigma2_1 = 1.0/(v1_err**2 + np.exp(noise1)**2)
inv_sigma2_2 = 1.0/(np.exp(lnf)*v2_err**2)
s1 = np.nansum((rv1_pred - v1)**2 * inv_sigma2_1 - np.log(inv_sigma2_1/(2*np.pi)))
s2 = np.nansum((rv2_pred - (v2 - rv2_pred*K1/K2 - dv2 - gamma))**2 * inv_sigma2_2 - np.log(inv_sigma2_2/(2*np.pi)))
# s2 = np.nansum((rv2_pred - (v2 - dv2))**2 * inv_sigma2_2 - np.log(inv_sigma2_2/(2*np.pi)))
#print(s1)
#print(s2)
return -0.5*(s1+s2)
#return -0.5*np.nansum((rv2_pred[first:] - (v[first:]+rv1_pred[first:]-dv))**2 * inv_sigma2 - np.log(inv_sigma2))
def lnprior_full(pars):
"""Gaussian prior
"""
K1, K2, P, T0, w, e, dv1, dv2, gamma, lnf, noise1 = pars
#if 4 < K1 < 6 and K2 > K1 and 0.6 < e < 0.7 and 6000 < P < 8500 and 0.35 < w < 0.7 and -20 < dv1 < 20 and -20 < dv2 < 20 and lnf < 0:
if 3 < K1 < 7 and K2 > K1 and 0.2 < e < 1. and 5000 < P < 9000 and 0.15 < w < 0.9 and -20 < dv1 < 20 and -20 < dv2 < 20 and -20 < gamma < 20 and lnf < 0:
return 0.0
#if K2 > K1 and -20 < dv1 < 20 and -20 < dv2 < 20 and lnf < 0:
# return -0.5*((K1-5.113)**2/0.1**2 + (P-7345)**2/1000**2 + (T0-2449824)**2/1000**2 +
# (w-0.506)**2/0.035**2 + (e-0.669)**2/0.016**2 +
# np.log(2*np.pi*(0.1**2 + 1000**2 + 1000**2 + 0.035**2 + 0.016**2)))
return -np.inf
def lnprob_full(pars, t1, v1, v1_err, t2, v2, v2_err):
lp = lnprior_full(pars)
return lp + lnlike_full(pars, t1, v1, v1_err, t2, v2, v2_err) if np.isfinite(lp) else -np.inf
def full_sb2_fit(t1, rv1, rv1_err, t2, rv2, rv2_err, Niter=1000):
"""
Do a full SB2 fit.
"""
#initial_pars = [5.113, 5.113/0.469, 7345, 2449824, 29*np.pi/180., 0.669, 4.018, -5.38, -3.61, -1.0]
initial_pars = [5.181, 10.786, 6763, 2450400, 32.7*np.pi/180., .679, 4.102, -5.47, 0.0, -3.25, -2.6]
ndim = len(initial_pars)
nwalkers = 300
p0 = emcee.utils.sample_ball(initial_pars, std=[1e-6]*ndim, size=nwalkers)
sampler = emcee.EnsembleSampler(nwalkers, ndim, lnprob_full, args=(t1, rv1, rv1_err, t2, rv2, rv2_err), threads=2)
# Run for a while
for i, result in enumerate(sampler.sample(p0, iterations=Niter)):
if i%10 == 0:
print('Done with iteration {:03d}'.format(i))
return sampler
"""
# Get the best chain position and resample from there
pos, lnp, state = result
best_pars = pos[np.argmax(lnp)]
p1 = emcee.utils.sample_ball(best_pars, std=[1e-6]*ndim, size=nwalkers)
# Run again (this is the production run)
sampler.reset()
for i, result in enumerate(sampler.sample(p1, iterations=2000)):
if i%10 == 0:
print('Done with production iteration {:03d}'.format(i))
return sampler
"""
def lnlike_sb1(pars, t1, v1, v1_err):
K1, P, T0, w, e, dv1 = pars
rv1_pred = get_rv(T0=T0, P=P, e=e, K1=K1, w=w, t=t1) + dv1
inv_sigma2_1 = 1.0/v1_err**2
s1 = np.nansum((rv1_pred - v1)**2 * inv_sigma2_1 - np.log(inv_sigma2_1))
return -0.5*s1
def lnprior_sb1(pars):
"""Gaussian prior
"""
K1, P, T0, w, e, dv1 = pars
#if 3 < K1 < 7 and K2 > K1 and 0.6 < e < 0.7 and 6000 < P < 8500 and 0.35 < w < 0.7 and -20 < dv1 < 20 and -20 < dv2 < 20 and lnf < 0:
if 3 < K1 < 7 and 0.2 < e < 1. and 5000 < P < 9000 and 0.15 < w < 0.9 and -20 < dv1 < 20:
return 0.0
return -np.inf
def lnprob_sb1(pars, t1, v1, v1_err):
lp = lnprior_sb1(pars)
return lp + lnlike_sb1(pars, t1, v1, v1_err) if np.isfinite(lp) else -np.inf
def sb1_fit(t1, rv1, rv1_err):
"""
Fit the primary star rvs only. This should be consistent with Stefano's fit.
"""
initial_pars = [5.113, 7345, 2449824, 29*np.pi/180., 0.669, 4.018]
ndim = len(initial_pars)
nwalkers = 300
p0 = emcee.utils.sample_ball(initial_pars, std=[1e-6]*ndim, size=nwalkers)
sampler = emcee.EnsembleSampler(nwalkers, ndim, lnprob_sb1, args=(t1, rv1, rv1_err), threads=2)
# Run for a while
for i, result in enumerate(sampler.sample(p0, iterations=1000)):
if i%10 == 0:
print('Done with burn-in iteration {:03d}'.format(i))
return sampler
"""
# Get the best chain position and resample from there
pos, lnp, state = result
best_pars = pos[np.argmax(lnp)]
p1 = emcee.utils.sample_ball(best_pars, std=[1e-6]*ndim, size=nwalkers)
# Run again (this is the production run)
sampler.reset()
for i, result in enumerate(sampler.sample(p1, iterations=2000)):
if i%10 == 0:
print('Done with production iteration {:03d}'.format(i))
return sampler
"""
def plot(pars, t1, v1, v1_err, t2, v2, v2_err, resids=True):
K1, K2, P, T0, w, e, dv1, dv2, lnf, noise = pars
rv1_pred = get_rv(T0=T0, P=P, e=e, K1=K1, w=w, t=t1)
rv2_pred = -get_rv(T0=T0, P=P, e=e, K1=K2, w=w, t=t2)
tplot = np.linspace(min(min(t1), min(t2)), max(max(t2), max(t2)), 100)
rv1_plot = get_rv(T0=T0, P=P, e=e, K1=K1, w=w, t=tplot)
rv2_plot = get_rv(T0=T0, P=P, e=e, K1=-K2, w=w, t=tplot)
inv_sigma2_1 = 1.0/v1_err**2
inv_sigma2_2 = 1.0/(np.exp(lnf)*v2_err**2)
s1 = np.nansum((rv1_pred - (v1-dv1))**2 * inv_sigma2_1 - np.log(inv_sigma2_1))
s2 = np.nansum((rv2_pred - (v2 - rv2_pred*K1/K2 - dv2))**2 * inv_sigma2_2 - np.log(inv_sigma2_2))
def tick_formatter(x, pos):
return "{:.0f}".format(x - 2450000)
MyFormatter = FuncFormatter(tick_formatter)
fig = plt.figure()
if resids:
gs = gridspec.GridSpec(5, 1)
top = plt.subplot(gs[:3])
resid1 = plt.subplot(gs[3], sharex=top)
resid2 = plt.subplot(gs[4], sharex=top)
fig.subplots_adjust(bottom=0.15, left=0.15, hspace=0.0)
top.errorbar(t1, v1-dv1, yerr=np.sqrt(v1_err**2 + np.exp(noise)), fmt='r^', label='Primary')
top.errorbar(t2, v2 - rv2_pred*K1/K2 - dv2, yerr=v2_err*np.exp(lnf/2.), fmt='ko', label='Secondary')
top.plot(tplot, rv1_plot, 'r-', alpha=0.5)
top.plot(tplot, rv2_plot, 'k-', alpha=0.5)
resid1.scatter(t1, v1-dv1 - rv1_pred)
resid1.plot(t1, np.zeros(len(t1)), 'r--')
resid1.set_ylabel('O-C (rv1)')
print('RMS scatter on primary = {:.3f} km/s'.format(np.std(v1-dv1-rv1_pred)))
resid2.scatter(t2, v2 - rv2_pred*K1/K2 - dv2 - rv2_pred)
resid2.plot(t2, np.zeros(len(t2)), 'r--')
resid2.set_ylabel('O-C (rv2)')
print('RMS scatter on secondary = {:.3f} km/s'.format(np.std(v2 - rv2_pred*K1/K2 - dv2 - rv2_pred)))
top.axes.get_xaxis().set_visible(False)
resid1.axes.get_xaxis().set_visible(False)
resid2.set_xlabel('JD - 2450000')
top.set_ylabel('RV (km/s)')
leg = top.legend(loc='best', fancybox=True)
top.xaxis.set_major_formatter(MyFormatter)
axes = [top, resid1, resid2]
else:
ax = fig.add_subplot(111)
ax.errorbar(t1, v1-dv1, yerr=v1_err, fmt='r^', label='Primary')
ax.errorbar(t2, v2 - rv2_pred*K1/K2 - dv2, yerr=v2_err*np.exp(lnf/2.), fmt='ko', label='Secondary')
ax.plot(tplot, rv1_plot, 'r-', alpha=0.5)
ax.plot(tplot, rv2_plot, 'k-', alpha=0.5)
ax.xaxis.set_major_formatter(MyFormatter)
ax.set_xlabel('JD - 2450000')
ax.set_ylabel('RV (km/s)')
leg = ax.legend(loc='best', fancybox=True)
axes = [ax]
# Calculate chi-squared
chi2 = (np.sum((v1-dv1 - rv1_pred)**2 / (v1_err**2 + np.exp(noise))) +
np.sum((v2 - rv2_pred*K1/K2 - dv2 - rv2_pred)**2 / (v2_err**2 * np.exp(lnf))))
N = len(v1) + len(v2)
print('X^2 = {:.2f}\nN = {}'.format(chi2, N))
return fig, axes
def read_primary_chains():
# Read in MCMC chains for the primary star parameters
home = os.environ['HOME']
fname = '{}/School/Research/McDonaldData/PlanetData/RV_fit/mcmc_samples/psidraa_els.txt'.format(home)
chain = pd.read_fwf(fname)
# Unit conversion
chain['k'] /= 1000.0
vals = chain[['tperi', 'period', 'ecc', 'k', 'lop']].get_values()
return vals
def read_data(first=20):
date, rv1, rv1_err, rv2, rv2_err = np.loadtxt('rv_data.npy')
return date, rv1, rv1_err, date[first:], rv2[first:], rv2_err[first:]
def do_partial_fit(N=100):
prim_pars = read_primary_chains()
t1, rv1, rv1_err, t2, rv2, rv2_err = read_data()
# Sample a subset of the chain parameters, since each one takes a little while
sample_list = []
for i, idx in enumerate(np.random.randint(0, prim_pars.shape[0], N)):
print('\n{}/{}: Fitting mass-ratio for primary orbit parameters: '.format(i+1, N))
print(prim_pars[idx])
samp = fit_partial(*prim_pars[idx], t=t2, rv2=rv2, rv2_err=rv2_err)
sample_list.append(samp)
return np.concatenate(sample_list)
if __name__ == '__main__':
t1, rv1, rv1_err, t2, rv2, rv2_err = read_data()
|
kgullikson88/Planet-Finder
|
MassRatio_Fitter.py
|
Python
|
mit
| 11,022
|
[
"Gaussian"
] |
ec672c5b873a8af049fb669683a78a1cd73efe1640868775966fb83eb76a3373
|
#-*- encoding: utf-8 -*-
"""
Monopoly odds
In the game, Monopoly, the standard board is set up in the following way:
GO A1 CC1 A2 T1 R1 B1 CH1 B2 B3 JAIL
H2 C1
T2 U1
H1 C2
CH3 C3
R4 R2
G3 D1
CC3 CC2
G2 D2
G1 D3
G2J F3 U2 F2 F1 R3 E3 E2 CH2 E1 FP
A player starts on the GO square and adds the scores on two 6-sided dice to determine the number of squares they advance in a clockwise direction. Without any further rules we would expect to visit each square with equal probability: 2.5%. However, landing on G2J (Go To Jail), CC (community chest), and CH (chance) changes this distribution.
In addition to G2J, and one card from each of CC and CH, that orders the player to go directly to jail, if a player rolls three consecutive doubles, they do not advance the result of their 3rd roll. Instead they proceed directly to jail.
At the beginning of the game, the CC and CH cards are shuffled. When a player lands on CC or CH they take a card from the top of the respective pile and, after following the instructions, it is returned to the bottom of the pile. There are sixteen cards in each pile, but for the purpose of this problem we are only concerned with cards that order a movement; any instruction not concerned with movement will be ignored and the player will remain on the CC/CH square.
Community Chest (2/16 cards):
1. Advance to GO
2. Go to JAIL
Chance (10/16 cards):
1. Advance to GO
2. Go to JAIL
3. Go to C1
4. Go to E3
5. Go to H2
6. Go to R1
7. Go to next R (railway company)
8. Go to next R
9. Go to next U (utility company)
10. Go back 3 squares.
The heart of this problem concerns the likelihood of visiting a particular square. That is, the probability of finishing at that square after a roll. For this reason it should be clear that, with the exception of G2J for which the probability of finishing on it is zero, the CH squares will have the lowest probabilities, as 5/8 request a movement to another square, and it is the final square that the player finishes at on each roll that we are interested in. We shall make no distinction between "Just Visiting" and being sent to JAIL, and we shall also ignore the rule about requiring a double to "get out of jail", assuming that they pay to get out on their next turn.
By starting at GO and numbering the squares sequentially from 00 to 39 we can concatenate these two-digit numbers to produce strings that correspond with sets of squares.
Statistically it can be shown that the three most popular squares, in order, are JAIL (6.24%) = Square 10, E3 (3.18%) = Square 24, and GO (3.09%) = Square 00. So these three most popular squares can be listed with the six-digit modal string: 102400.
If, instead of using two 6-sided dice, two 4-sided dice are used, find the six-digit modal string.
"""
from utils import *
#
|
zlsun/ProjectEuler
|
084.py
|
Python
|
mit
| 3,189
|
[
"VisIt"
] |
9a0754f8779d65ec32329241a4deb0751c93758ddbb26cbf02c6b6cc084c0a33
|
#!/usr/bin/env python3
"""
Reads the uniref100 flat file and creates a SQLite3 database of commonly-accessed attributes
for each entry.
INPUT
When you BLAST against a database like UniRef100 or UniRef90 you'll get accessions like:
UniRef100_A8PWR2 RepID=A8PWR2_MALGO
UniRef100_M9MHS5 RepID=M9MHS5_PSEA3
The first is the accession and later in the description contains the link to the RepID.
A8PWR2_MALGO
Entries for these are found in the uniprot_trembl.dat.gz file.
We're primarily looking for the following attributes:
- id
- accession
- gene product names
- gene symbol
- GO terms
- EC numbers
These look like this in the file:
ID A8PWR1_MALGO Unreviewed; 963 AA.
AC Q9NRN7; B2R6D1; Q9C068; Q9P0Q3; Q9UG80; Q9Y389;
DE SubName: Full=Putative uncharacterized protein;
DR GO; GO:0000062; F:fatty-acyl-CoA binding; IEA:InterPro.
DR GO; GO:0008289; F:lipid binding; IEA:UniProtKB-KW.
DR GO; GO:0005737; C:cytoplasm; IEA:UniProtKB-SubCell.
DR GO; GO:0005634; C:nucleus; IEA:Compara.
OS Homo sapiens (Human).
GN Name=amoA;
DE EC=6.3.4.3;
DE EC=1.5.1.5;
DE EC=3.5.4.9;
DE EC=6.3.4.3;
OUTPUT
The following tables are created in the SQLite3 db (which is created if it doesn't
already exist) (these are fake example data, and there are a lot of 1:many
relationships here):
table: uniprot_sprot
table: uniref
----------
id = 001R_FRG3G
full_name = 11S globulin subunit beta
organism = Frog virus 3 (isolate Goorha)
symbol = FV3-001R
uniprot_sprot_acc
uniref_acc
-----------------
id = 001R_FRG3G
accession = Q6GZX4
uniprot_sprot_go
uniref_go
----------------
id = 001R_FRG3G
go_id = string (0005634)
uniprot_sprot_ec
uniref_ec
----------------
id = 001R_FRG3G
ec_num = 6.3.4.3
"""
import argparse
import gzip
import os
import re
import sqlite3
pfam2go = dict()
def main():
parser = argparse.ArgumentParser( description='Reads a uniprot_trembl.dat file and creates a SQLite3 database of commonly-accessed attributes for each accession.')
## output file to be written
parser.add_argument('-i', '--input', type=str, required=True, help='Path to the uniprot_trembl.dat file. See INPUT section for details' )
parser.add_argument('-o', '--output_db', type=str, required=True, help='Path to an output SQLite3 db to be created' )
args = parser.parse_args()
# this creates it if it doesn't already exist
conn = sqlite3.connect(args.output_db)
curs = conn.cursor()
print("INFO: Creating tables ...")
create_tables( curs )
conn.commit()
id = None
accs = list()
full_name = None
organism = None
symbol = None
go_ids = list()
ec_nums = list()
record_count = 0
is_compressed = False
if args.input.endswith('.gz'):
ifh = gzip.open(args.input, 'rb')
is_compressed = True
else:
ifh = open(args.input)
print("INFO: Parsing DAT file ...")
for line in ifh:
if is_compressed:
line = line.decode()
line = line.rstrip()
# is this the end of an entry?
if re.match( "^//", line ):
# save
curs.execute("INSERT INTO uniref (id, full_name, organism, symbol) values (?,?,?,?)", (id, full_name, organism, symbol))
for acc in accs:
curs.execute("INSERT INTO uniref_acc (id, accession) values (?,?)", (id, acc))
for go_id in go_ids:
curs.execute("INSERT INTO uniref_go (id, go_id) values (?,?)", (id, go_id))
for ec_num in ec_nums:
curs.execute("INSERT INTO uniref_ec (id, ec_num) values (?,?)", (id, ec_num))
# reset
id = None
accs = list()
full_name = None
organism = None
symbol = None
go_ids = list()
ec_nums = list()
record_count += 1
if record_count % 1000 == 0:
print("{0} records processed ...".format(record_count))
conn.commit()
# for debugging only
#break
elif line.startswith("ID"):
id = line.split()[1]
elif line.startswith("AC"):
ac_parts = line.split()
for part in ac_parts:
if part == 'AC':
continue
else:
part = part.rstrip(';')
accs.append(part)
elif line.startswith("DE"):
m = re.match("DE\s+SubName: Full=(.+)", line.rstrip(';'))
if m and full_name is None:
full_name = m.group(1)
else:
m = re.match("DE\s+RecName: Full=(.+)", line.rstrip(';'))
if m and full_name is None:
full_name = m.group(1)
else:
m = re.search("EC=(\S+)", line.rstrip(';'))
if m:
ec_nums.append(m.group(1))
elif line.startswith("DR"):
m = re.search("GO:(\d+)", line)
if m:
go_ids.append(m.group(1))
elif line.startswith("OS"):
m = re.match("OS\s+(.+)\.$", line)
if m:
organism = m.group(1)
elif line.startswith("GN"):
m = re.search("^GN\s+Name=(.+?)\;", line)
if m:
symbol = m.group(1)
conn.commit()
print("INFO: Creating indexes ...")
create_indexes(curs)
conn.commit()
curs.close()
print("INFO: Complete.")
def create_indexes( cursor ):
# CREATE INDEX index_name ON table_name (column_name);
cursor.execute("CREATE INDEX idx_col_us_id ON uniref (id)")
cursor.execute("CREATE INDEX idx_col_usa_id ON uniref_acc (id)")
cursor.execute("CREATE INDEX idx_col_usa_acc ON uniref_acc (accession)")
cursor.execute("CREATE INDEX idx_col_usg_id ON uniref_go (id)")
cursor.execute("CREATE INDEX idx_col_usg_go ON uniref_go (go_id)")
cursor.execute("CREATE INDEX idx_col_use_id ON uniref_ec (id)")
cursor.execute("CREATE INDEX idx_col_use_ec ON uniref_ec (ec_num)")
def create_tables( cursor ):
cursor.execute("""
CREATE TABLE uniref (
id text primary key,
full_name text,
organism text,
symbol text
)
""")
cursor.execute("""
CREATE TABLE uniref_acc (
id text not NULL,
accession text not NULL
)
""")
cursor.execute("""
CREATE TABLE uniref_go (
id text not NULL,
go_id text not NULL
)
""")
cursor.execute("""
CREATE TABLE uniref_ec (
id text not NULL,
ec_num text not NULL
)
""")
if __name__ == '__main__':
main()
|
zctea/biocode
|
blast/uniref_to_sqlite3.py
|
Python
|
gpl-3.0
| 7,123
|
[
"BLAST"
] |
40fd6ff03abb3a807af1164cd8c15b885221c13416d49e5a05378430af5ec610
|
"""
autotune.py - module generate test data for autotune
Usage:
import autotune
autotune.baseline_test()
autotune.rms_spectrum_test(song='tainted', tuning_f0=110.) # RMS power of equal-temperament bins for song 'tainted'
autotune.mixture_test(song='tainted', tuning_f0=110.) # apply autotune algorithm to song/vocals.wav and mix with song/background.wav
autotune.predominant_melody_test(song='tained', tuning_f0=110.)
Author: Michael A. Casey
Copyright (C) 2015, Bregman Media Labs, Dartmouth College
License: Apache 2.0, see LICENSE file
"""
from bregman.suite import * # the Bregman audio processing toolkit
import matplotlib
from matplotlib.pyplot import *
from matplotlib.mlab import rms_flat
from matplotlib.cbook import flatten
from numpy.linalg import svd
from numpy import *
import os, glob
import scikits.audiolab as audio
import pickle
try:
import voweltimbre as sung
except:
print "warning: voweltimbre not installed"
try:
from essentia.standard import *
except:
print "warning: essentia not installed"
equal_temperament = array(TuningSystem().equal_temperament()) # Ratios for equal temperament tuning
just_intonation = array(TuningSystem().just_intonation()) # Ratios for equal temperament tuning
pythagorean = array(TuningSystem().Pythagorean()) # Ratios for equal temperament tuning
major_scale = [0,2,4,5,7,9,11,12]
f0 = 440 # tuning reference frequency
def gen_audio(f0=440, tuning=equal_temperament, scale=major_scale, filename=None):
# Audio envelope for a note
env = r_[linspace(0,1,440),linspace(1,.8,220), .8*ones(22050-3*660), linspace(.8,0,660), zeros(660)]
# Construct the scale at the given tuning
x = hstack([env * harmonics(f0=f, num_harmonics=10, num_points=22050) for f in f0*tuning[scale]])
if filename is None:
play(balance_signal(x))
else:
wavwrite(x,filename,44100)
def gen_test_signals():
gen_audio(f0, equal_temperament, major_scale, 'A440_Equal_Major.wav')
gen_audio(f0, just_intonation, major_scale, 'A440_Just_Major.wav')
gen_audio(f0, pythagorean, major_scale, 'A440_Pythagorean_Major.wav')
# Detune entire scale
f1 = f0*2**(0.4/12.) # A440 * {0.4 semitone}
gen_audio(f1, equal_temperament, major_scale, 'A450_Equal_Major.wav')
gen_audio(f1, just_intonation, major_scale, 'A450_Just_Major.wav')
gen_audio(f1, pythagorean, major_scale, 'A450_Pythagorean_Major.wav')
def load_signals(dir_expr="*.wav"):
flist = sorted(glob.glob(dir_expr))
if len(flist) ==0 :
raise ValueError("No files found.")
X = {}
ext = flist[0].split('.')[-1][:3]
if ext=='wav':
afun = wavread
elif ext=='aif':
afun = aiffread
else:
raise ValueError("Unrecognized audio file extension: %s"%ext)
for f in flist:
x, sr, fmt = afun(f)
f = f.split(os.sep)[-1] if len(f.split(os.sep)) else f
f = f.split('.')[0].replace(' ','_')
X[f] = x
return X
def peaks_to_autotuned_spectrum(audio, peaks, factor=1.0, N=8192, H=2048, SR=44100.):
"""
Peaks to autotuned short-time Fourier transform
inputs:
audio - float array
peaks - peak dict of 'freqs' and 'mags' per time-point
factor - amount to autotune [1.0=100%]
N - fft length
H - fft hop
SR - audio sample rate
outputs:
bregman.LogFrequencySpectrum
"""
freqs, mags = peaks['freqs'], peaks['mags']
F = features.LinearFrequencySpectrum(audio, nfft=N, wfft=N, nhop=H)
eq_freqs = 55*2**(arange(0,8.5,1/12.))
eq_bins = [argmin(abs(F._fftfrqs-f)) for f in eq_freqs]
Xhat = zeros(F.X.shape)
T = Xhat.shape[1]
for t in xrange(len(freqs)):
if t<T:
for i,(f,a) in enumerate(zip(freqs[t],mags[t])):
if i==0:
eq_freq = eq_freqs[argmin(abs(eq_freqs-f))]
eq_ratio = eq_freq / f # fundamental frequency
df = f * (eq_ratio - 1.0)
f_idx = argmin(abs(F._fftfrqs - (f + factor * df))) # harmonics
Xhat[f_idx,t]=a
F.X = Xhat
return F
def auto_tune(fname, factor=1.0):
X = load_signals(fname)
x = array(X[X.keys()[0]][:,0],dtype='f')
peaks = sung.predominant_harmonics(x, fname)
stft = peaks_to_autotuned_spectrum(x, peaks, factor)
xhat = stft.inverse(stft.X)
return xhat
def normalize(A, axis=None):
Ashape = A.shape
try:
norm = A.sum(axis) + EPS
except TypeError:
norm = A.copy()
for ax in reversed(sorted(axis)):
norm = norm.sum(ax)
norm += EPS
if axis:
nshape = np.array(Ashape)
nshape[axis] = 1
norm.shape = nshape
return A / norm
def baseline_test():
# load reference, detuned, and auto-tuned signals
xdata = load_signals("A*_Equal_Major.wav")
ydata = load_signals("A450_Equal_Major_100.wav")
x0 = xdata["A440_Equal_Major"] # reference
x1 = xdata['A450_Equal_Major'] # detuned (f0)
y1 = ydata['A450_Equal_Major_100'] # autotuned (processed signal)
# Spectral bin resolution = 2.0Hz, time resolution = 2.0Hz
n = 44100 / 2
X0 = LinearFrequencySpectrum(x0, nfft=n, wfft=n, nhop=n)
X1 = LinearFrequencySpectrum(x1, nfft=n, wfft=n, nhop=n)
Y1 = LinearFrequencySpectrum(y1, nfft=n, wfft=n, nhop=n)
freqs = X0._fftfrqs
figure()
semilogx(freqs,X1.X[:,0])
semilogx(freqs,Y1.X[:,0])
semilogx(freqs,X0.X[:,0],'--')
title('Melodyne shifts harmonics of A450Hz to A440Hz', fontsize=20)
xlabel('Frequency (Hz)',fontsize=20)
ylabel('Power',fontsize=20)
legend(['450Hz Original','Autotune','440Hz Reference'],loc=0)
eq_freqs = 110*2**(arange(0,6,1/12.))
eq_bins = [argmin(abs(X0._fftfrqs-f)) for f in eq_freqs]
ax = axis()
plot(c_[X0._fftfrqs[eq_bins],X0._fftfrqs[eq_bins]].T,c_[[ax[2]]*len(eq_freqs),[ax[3]]*len(eq_freqs)].T,'k--')
def mixture_test(song='tainted'):
"""
Display spectral profiles of original and autotuned mixture spectra
inputs:
song - directory name of song (contains: song vocals.wav and background.wav)
outputs:
mix_000, mix_100 - mixed vocals and background for nontuned and autotuned vocals
"""
X = load_signals(song+os.sep+'*.wav')
x0 = X['vocals']
x1 = X['background']
xhat0 = auto_tune(song+os.sep+'vocals.wav',0.0)[:len(x0)] # no autotune
xhat1 = auto_tune(song+os.sep+'vocals.wav',1.0)[:len(x0)] # autotuned to 440Hz
mix0 = (balance_signal(c_[xhat0,xhat0])+balance_signal(x1))/2.0 # background+vocals no autotune
mix1 = (balance_signal(c_[xhat1,xhat1])+balance_signal(x1))/2.0 # background+vocals with autotune
# Short-time Fourier analysis
F0 = LinearFrequencySpectrum(mix0,nfft=8192,wfft=8192,nhop=2048)
F1 = LinearFrequencySpectrum(mix1,nfft=8192,wfft=8192,nhop=2048)
eq_freqs = 110*2**(arange(0,5,1/12.))
eq_bins = [argmin(abs(F1._fftfrqs-f)) for f in eq_freqs]
# Plot spectra and ideal autotuned pitch bins
figure()
semilogx(F0._fftfrqs, normalize(F0.X).mean(1))
semilogx(F1._fftfrqs, normalize(F1.X).mean(1))
ax = axis()
plot(c_[F0._fftfrqs[eq_bins],F0._fftfrqs[eq_bins]].T,c_[[ax[2]]*len(eq_freqs),[ax[3]]*len(eq_freqs)].T,'k--')
legend(['Original vocals','Autotuned vocals','ET pitch'],loc=0)
title(song+': untuned/tuned vocals mixed with background', fontsize=20)
xlabel('Frequency (Hz)',fontsize=20)
ylabel('Power',fontsize=20)
# Calculate RMS amplitude in equal-temperament pitch bands
text(1,ax[3]*.9, "ET bands nontuned RMS = %f"%(F0.X[eq_bins]**2).mean()**0.5, fontsize=14)
text(1,ax[3]*.8, "ET bands autotuned RMS = %f"%(F1.X[eq_bins]**2).mean()**0.5, fontsize=14)
return mix0, mix1
def rms_spectrum_test(song='tainted', tuning_f0=110., channel=0):
"""
Extract spectral RMS power for equal temperament pitches
inputs:
song - directory name of song (contains: song/mix_000.wav and song/mix_100.wav non-autotuned and autotuned mixes)
tuning_f0 - lowest frequency to track melody (110Hz = A440Hz/4) [110]
channel - whether to use 0=left, 1=right, or 2=both channels [0]
outputs:
dict {'nontuned_rms':df0, 'autotuned_rms':df1} energy (RMS power) at ideal pitch tuning freqs
"""
x0, sr, fmt = wavread(song+os.sep+'mix_000.wav')
x1, sr, fmt = wavread(song+os.sep+'mix_100.wav')
if channel==2: # mix the channels
if len(x0.shape) > 1:
x0 = x0.mean(1)
if len(x1.shape) > 1:
x1 = x1.mean(1)
else: # extract given channel
if len(x0.shape) > 1:
x0 = x0[:,channel]
if len(x1.shape) > 1:
x1 = x1[:,channel]
# Short-time Fourier analysis
F0 = LinearFrequencySpectrum(x0,nfft=8192,wfft=8192,nhop=2048)
F1 = LinearFrequencySpectrum(x1,nfft=8192,wfft=8192,nhop=2048)
eq_freqs = tuning_f0*2**(arange(0,5,1/12.))
eq_bins = array([argmin(abs(F0._fftfrqs-f)) for f in eq_freqs])
# df0 = normalize(F0.X)[eq_bins].mean(1)
df0 = (normalize(F0.X)[eq_bins]**2).mean(1)**0.5
#df1 = nomalize(F1.X)[eq_bins].mean(1)
df1 = (normalize(F1.X)[eq_bins]**2).mean(1)**0.5
figure()
semilogx(F0._fftfrqs[eq_bins], df0)
semilogx(F0._fftfrqs[eq_bins], df1)
legend(['Original vocals','Autotuned vocals'],loc=0)
title(song+': ET bands untuned/tuned vocals mixed with background', fontsize=20)
xlabel('Equal Temperament Bands (Hz)',fontsize=20)
ylabel('Power',fontsize=20)
grid()
return {'nontuned_rms':rms_flat(df0), 'autotuned_rms':rms_flat(df1)}
def predominant_melody_test(song='tainted', tuning_f0=110., channel=0):
"""
Extract predominant melody (f0 track) and compare to equal temperament tuning.
inputs:
song - directory name of song (contains: song/mix_000.wav and song/mix_100.wav non-autotuned and autotuned mixes)
tuning_f0 - lowest frequency to track melody (110Hz = A440Hz/4) [110]
channel - whether to use 0=left, 1=right, or 2=both channels [0]
outputs:
dict {'nontuned_deltas':df0, 'autotuned_deltas':df1} deviations from ideal pitch tuning
"""
p0 = PredominantMelody(frameSize=4096, hopSize=2048,
minFrequency=80.0, maxFrequency=20000., guessUnvoiced=True, voiceVibrato=False)
p1 = PredominantMelody(frameSize=4096, hopSize=2048,
minFrequency=80.0, maxFrequency=20000., guessUnvoiced=True, voiceVibrato=False)
x0, sr, fmt = wavread(song+os.sep+'mix_000.wav')
x1, sr, fmt = wavread(song+os.sep+'mix_100.wav')
if channel==2: # mix the channels
if len(x0.shape) > 1:
x0 = x0.mean(1)
if len(x1.shape) > 1:
x1 = x1.mean(1)
else: # extract given channel
if len(x0.shape) > 1:
x0 = x0[:,channel]
if len(x1.shape) > 1:
x1 = x1[:,channel]
mel00 = p0(array(x0,dtype='f'))[0]
mel10 = p1(array(x1,dtype='f'))[0]
eq_freqs = tuning_f0*2**(arange(0,5,1/12.))
df0 = median([min(abs(eq_freqs-f)) for f in mel00[where(mel00)]])
df1 = median([min(abs(eq_freqs-f)) for f in mel10[where(mel10)]])
return {'nontuned_deltas':df0, 'autotuned_deltas':df1}
def eval_gauss(x, mu,sigma2):
"""
evaluate point x on 1d gaussian with mean mu and variance sigma2
"""
return 1.0/sqrt(2*pi*sigma2)*exp(-0.5*(x-mu)/sigma2)
def dB(x):
return 20*log10(x)
def calc_precrec(t0w0,t0w1,t1w0,t1w1,null_clf):
"""
Calculate precision-recall from log likelihoods
inputs:
t0w0 - log likelihood of null data with null model
t0w1 - log likelihood of null data with autotune model
t1w0 - log likelihood of autotune data with null model
t1w1 - log likelihood of autotune data with autotune model
outputs:
prec - precision for each retrieved autotune datum
rec - recall for each retrieved autotune datum
"""
if null_clf:
t = argsort(r_[t0w0-t0w1,t1w0-t1w1])[::-1] # TP + FP
else:
t = argsort(r_[t1w1-t1w0,t0w1-t0w0])[::-1] # TP + FP
N = len(t) # count percentiles for 100% precision
prec, rec = [], []
for i in xrange(N):
if sum(t[:i+1]<N/2):
prec.append(sum(t[:i+1]<N/2)/float(i+1))
rec.append(sum(t[:i+1]<N/2)/float(N/2))
if rec[-1]>=1.0-finfo(float).eps:
break
return prec,rec
def calc_fscore(r,p):
"""
given recall and precision arrays, calculate the f-measure (f-score)
"""
a = array(zip(flatten(r),flatten(p)))
r,p = a[:,0],a[:,1]
idx = where(r)
r,p = r[idx],p[idx]
F = (2*p*r/(p+r)).mean()
return F
def evaluate_classifier(fname='saved_data.pickle', use_pca=True, null_clf=False, eps=finfo(float).eps, clip=-100):
"""
Gaussian classifier for non-tuned / autotuned equal-temparement magnitudes
"""
with open(fname,'rb') as f:
data = pickle.load(f)
a0 = array([[dd['nontuned_mags'] for dd in d] for d in data[1::2]])
a1 = array([[dd['autotuned_mags'] for dd in d] for d in data[1::2]])
P,TP,FN,FP,TN,PR,RE = [],[],[],[],[],[],[]
T0W0,T0W1,T1W0,T1W1 = [],[],[],[]
for song in arange(len(a0)):
# per-song precision / recall
idx = setdiff1d(arange(len(a0)),[song])
train0=dB(array([a for a in flatten(a0[idx])]))
train1=dB(array([a for a in flatten(a1[idx])]))
test0=dB(array([a for a in flatten(a0[song])]))
test1=dB(array([a for a in flatten(a1[song])]))
if use_pca:
u,s,v = svd(array([train0,train1]).T,0)
train0 = u[:,0]
train1 = u[:,1]
test = array([test0,test1]).T
test = dot(dot(test,v.T),diag(1./s))
test0 = test[:,0]
test1 = test[:,1]
m0,v0 = train0.mean(),train0.var()
m1,v1 = train1.mean(),train1.var()
P.append(len(test0))
t1w0,t1w1 = log(eval_gauss(test1,m0,v0)+eps), log(eval_gauss(test1,m1,v1)+eps)
t0w0,t0w1 = log(eval_gauss(test0,m0,v0)+eps), log(eval_gauss(test0,m1,v1)+eps)
if clip!=0:
t1w0[t1w0<clip]=clip
t1w1[t1w1<clip]=clip
t0w0[t0w0<clip]=clip
t0w1[t0w1<clip]=clip
T0W0.append(t0w0)
T0W1.append(t0w1)
T1W0.append(t1w0)
T1W1.append(t1w1)
TP.append(sum(t1w1>t1w0))
FN.append(sum(t1w1<=t1w0))
FP.append(sum(t0w1>t0w0))
TN.append(sum(t0w1<=t0w0))
prec,rec = calc_precrec(t0w0,t0w1,t1w0,t1w1,null_clf)
PR.append(prec)
RE.append(rec)
F = calc_fscore(RE,PR)
return {'P':array(P),'TP':array(TP),'FN':array(FN),'FP':array(FP),'TN':array(TN),
'PR':PR,'RE':RE,'F':F, 'T0W0':T0W0,'T0W1':T0W1,'T1W0':T1W0,'T1W1':T1W1}
def plot_evaluation(stats, N=10.):
figure()
PR = array(zip(flatten(stats['RE']),flatten(stats['PR'])))
PR[:,0] = fix(PR[:,0]*N)/float(N) # divide recall into deciles
precrec = []
for re in unique(PR[:,0]):
p = PR[:,1][where(PR[:,0]==re)]
precrec.append((re,p.mean(),p.std()/sqrt(len(p))))
errorbar(x=re,y=p.mean(),yerr=p.std()/sqrt(len(p)),color='b')
plot(re,p.mean(),'bx')
precrec=array(precrec)
plot(precrec[:,0],precrec[:,1],'b--')
axis([-0.05,1.05,0,1.05])
grid()
title('ROC autotuned/non-tuned classifier',fontsize=20)
xlabel('Recall (standardized deciles)', fontsize=16)
ylabel('Precision', fontsize=16)
text(.85,.95,'F1=%.2f'%stats['F'],fontsize=16)
return precrec
|
bregmanstudio/voxid
|
autotune.py
|
Python
|
apache-2.0
| 14,190
|
[
"Gaussian"
] |
6b18ada881948697494b641d98fb43bfd4612b8f07b8027b294e20656d44c7bc
|
# -*- coding: utf-8 -*-
import numpy as np
def vectoa(Xg,Yg,X,Y,U,V,corrlenx,corrleny,err,b=0):
'''
(Adapted from Filipe Fernandes function)
Vectoa is a vectorial objective analysis function.
It interpolates a velocity field (U and V, east and north velocity components)
into a streamfunction field (dimension MxN) solving the Laplace equation:
$nabla^{2}Psi=0$.
======================================================================
Input:
Xg & Yg - Grid of of interpolation points (i.e. LON & LAT grid)
X & Y - Arrays of observation points
U & V - Arrays of observed east and north components of velocity
corrlen & err - Correlation length scales (in x and y) and error for a
gaussian streamfunction covariance function (floats)
b - Constant value that forces a correction in the data mean value.
Unless it is defined, b=0 by default.
======================================================================
Output:
PSI - Streamfuction field matrix with MxN dimension.
The dimension of the output is always the same of XC & YC
======================================================================
PYTHON VERSION by:
Iury Sousa and Hélio Almeida - 30 May 2016
Laboratório de Dinâmica Oceânica - IOUSP
======================================================================'''
# making sure that the input variables aren't changed
xc,yc,x,y,u,v=Xg.copy(),Yg.copy(),X.copy(),Y.copy(),U.copy(),V.copy()
corrlen = corrleny
xc = xc*( corrleny*1./corrlenx)
x = x*(corrleny*1./corrlenx)
n = len(x)
# Joins all the values of velocity (u then v) in one column-wise array.
# Being u and v dimension len(u)=y, then uv.shape = (2*y,1)
uv=np.array([np.hstack((u,v))]).T #data entered row-wise
# CALCULATING angles and distance
# pp is a join of two matrix calculating the distance between every point of observation and all others like the
# example below
# len(y) = M -> pp[0].shape = MxM being:
# pp[0][i] = y-y[i]
pp = -np.tile(y,(n,1)).T+np.tile(y,(n,1)),-np.tile(x,(n,1)).T+np.tile(x,(n,1))
#
t = []
for ii,jj in zip(pp[0].ravel(),pp[1].ravel()):
t.append(np.math.atan2(ii,jj))
t = np.array(t)
t.shape = pp[0].shape
# t end up to be angles and d2 the distances between every observation point and all the others
d2=((np.tile(x,(n,1)).T-np.tile(x,(n,1)))**2+(np.tile(y,(n,1)).T-np.tile(y,(n,1)))**2)
lambd = 1/(corrlen**2)
bmo=b*err/lambd
R=np.exp(-lambd*d2) #%longitudinal
S=R*(1-2*lambd*d2)+bmo #%transverse
R=R+bmo
A=np.zeros((2*n,2*n))
A[0:n,0:n]=(np.cos(t)**2)*(R-S)+S
A[0:n,n:2*n]=np.cos(t)*np.sin(t)*(R-S)
A[n:2*n,0:n]=A[0:n,n:2*n]
A[n:2*n,n:2*n]=(np.sin(t)**2)*(R-S)+S
A=A+err*np.eye(A.shape[0])
# angles and distances
nv1,nv2 =xc.shape
nv=nv1*nv2
xc = xc.T.ravel()
yc = yc.T.ravel()
#% the same idea of pp but this time for interpolation points
ppc = -np.tile(yc,(n,1)).T+np.tile(y,(nv,1)),-np.tile(xc,(n,1)).T+np.tile(x,(nv,1))
tc = []
for ii,jj in zip(ppc[0].ravel(),ppc[1].ravel()):
tc.append(np.math.atan2(ii,jj))
tc = np.array(tc)
tc.shape = ppc[0].shape
d2=((np.tile(xc,(n,1)).T-np.tile(x,(nv,1)))**2+(np.tile(yc,(n,1)).T-np.tile(y,(nv,1)))**2)
R=np.exp(-lambd*d2)+bmo;
P=np.zeros((nv,2*n))
# streamfunction-velocity covariance
P[:,0:n]=np.sin(tc)*np.sqrt(d2)*R;
P[:,n:2*n]=-np.cos(tc)*np.sqrt(d2)*R;
PSI=np.dot(P,np.linalg.solve(A,uv)) # solvi the linear system
PSI=PSI.reshape(nv2,nv1).T
return PSI
def scaloa(xc, yc, x, y, t=[], corrlenx=None,corrleny=None, err=None, zc=None):
"""
(Adapted from Filipe Fernandes function)
Scalar objective analysis. Interpolates t(x, y) into tp(xc, yc)
Assumes spatial correlation function to be isotropic and Gaussian in the
form of: C = (1 - err) * np.exp(-d**2 / corrlen**2) where:
d : Radial distance from the observations.
Parameters
----------
corrlen : float
Correlation length.
err : float
Random error variance (epsilon in the papers).
Return
------
tp : array
Gridded observations.
ep : array
Normalized mean error.
Examples
--------
See https://ocefpaf.github.io/python4oceanographers/blog/2014/10/27/OI/
Notes
-----
The funcion `scaloa` assumes that the user knows `err` and `corrlen` or
that these parameters where chosen arbitrary. The usual guess are the
first baroclinic Rossby radius for `corrlen` and 0.1 e 0.2 to the sampling
error.
"""
corrlen = corrleny
xc = xc*( corrleny*1./corrlenx)
x = x*(corrleny*1./corrlenx)
n = len(x)
x, y = np.reshape(x, (1, n)), np.reshape(y, (1, n))
# Squared distance matrix between the observations.
d2 = ((np.tile(x, (n, 1)).T - np.tile(x, (n, 1))) ** 2 +
(np.tile(y, (n, 1)).T - np.tile(y, (n, 1))) ** 2)
nv = len(xc)
xc, yc = np.reshape(xc, (1, nv)), np.reshape(yc, (1, nv))
# Squared distance between the observations and the grid points.
dc2 = ((np.tile(xc, (n, 1)).T - np.tile(x, (nv, 1))) ** 2 +
(np.tile(yc, (n, 1)).T - np.tile(y, (nv, 1))) ** 2)
# Correlation matrix between stations (A) and cross correlation (stations
# and grid points (C))
A = (1 - err) * np.exp(-d2 / corrlen ** 2)
C = (1 - err) * np.exp(-dc2 / corrlen ** 2)
if 0: # NOTE: If the parameter zc is used (`scaloa2.m`)
A = (1 - d2 / zc ** 2) * np.exp(-d2 / corrlen ** 2)
C = (1 - dc2 / zc ** 2) * np.exp(-dc2 / corrlen ** 2)
# Add the diagonal matrix associated with the sampling error. We use the
# diagonal because the error is assumed to be random. This means it just
# correlates with itself at the same place.
A = A + err * np.eye(len(A))
# Gauss-Markov to get the weights that minimize the variance (OI).
tp = None
ep = 1 - np.sum(C.T * np.linalg.solve(A, C.T), axis=0) / (1 - err)
if any(t)==True: ##### was t!=None:
t = np.reshape(t, (n, 1))
tp = np.dot(C, np.linalg.solve(A, t))
#if 0: # NOTE: `scaloa2.m`
# mD = (np.sum(np.linalg.solve(A, t)) /
# np.sum(np.sum(np.linalg.inv(A))))
# t = t - mD
# tp = (C * (np.linalg.solve(A, t)))
# tp = tp + mD * np.ones(tp.shape)
return tp, ep
if any(t)==False: ##### was t==None:
print("Computing just the interpolation errors.")
#Normalized mean error. Taking the squared root you can get the
#interpolation error in percentage.
return ep
|
iuryt/ocean
|
OceanLab/oa.py
|
Python
|
mit
| 6,744
|
[
"Gaussian"
] |
cba782a8a06755b46d3579ab0dc813eb6fdacbe063d32094a2b302ff2ea60ba1
|
""" AlwaysProbingPolicy module """
from DIRAC import S_OK
from DIRAC.ResourceStatusSystem.PolicySystem.PolicyBase import PolicyBase
class AlwaysProbingPolicy(PolicyBase):
"""
The AlwaysProbingPolicy is a dummy module that can be used as example, it
always returns Probing status.
"""
@staticmethod
def _evaluate(commandResult):
"""
It returns Probing status, evaluates the default command, but its output
is completely ignored.
"""
policyResult = {"Status": "Probing", "Reason": "AlwaysProbing"}
return S_OK(policyResult)
|
DIRACGrid/DIRAC
|
src/DIRAC/ResourceStatusSystem/Policy/AlwaysProbingPolicy.py
|
Python
|
gpl-3.0
| 599
|
[
"DIRAC"
] |
2964c0761ad7cc65d70934ca8f8d7c388a41e60441b53be510ce65457738f7fc
|
import gen_utils
from module_base import ModuleBase
from module_mixins import ScriptedConfigModuleMixin
import module_utils
import vtk
import vtktud
class MyGlyph3D(ScriptedConfigModuleMixin, ModuleBase):
def __init__(self, module_manager):
# initialise our base class
ModuleBase.__init__(self, module_manager)
self._glyph3d = vtktud.vtkMyGlyph3D()
module_utils.setup_vtk_object_progress(self, self._glyph3d,
'Making 3D glyphs')
self._config.scaling = 1.0
self._config.scalemode = 1.0
configList = [
('Scaling:', 'scaling', 'base:float', 'text',
'Glyphs will be scaled by this factor.'),
('Scalemode:', 'scalemode', 'base:int', 'text',
'Scaling will occur by scalar, vector direction or magnitude.')]
ScriptedConfigModuleMixin.__init__(self, configList)
self._viewFrame = self._createWindow(
{'Module (self)' : self,
'vtkMyGlyph3D' : self._glyph3d})
# pass the data down to the underlying logic
self.config_to_logic()
# and all the way up from logic -> config -> view to make sure
self.logic_to_config()
self.config_to_view()
def close(self):
# we play it safe... (the graph_editor/module_manager should have
# disconnected us by now)
for input_idx in range(len(self.get_input_descriptions())):
self.set_input(input_idx, None)
# this will take care of all display thingies
ScriptedConfigModuleMixin.close(self)
ModuleBase.close(self)
# get rid of our reference
del self._glyph3d
def get_input_descriptions(self):
return ('vtkPolyData',)
def set_input(self, idx, inputStream):
self._glyph3d.SetInput(inputStream)
def get_output_descriptions(self):
return ('Glyphs (vtkPolyData)', )
def get_output(self, idx):
return self._glyph3d.GetOutput()
def logic_to_config(self):
self._config.scaling = self._glyph3d.GetScaling()
self._config.scalemode = self._glyph3d.GetScaleMode()
def config_to_logic(self):
self._glyph3d.SetScaling(self._config.scaling)
self._glyph3d.SetScaleMode(self._config.scalemode)
def execute_module(self):
self._glyph3d.Update()
|
chrisidefix/devide
|
modules/user/opticflow/MyGlyph3D.py
|
Python
|
bsd-3-clause
| 2,484
|
[
"VTK"
] |
ab2783d3559e939edff7639d96b90c42eddd37b3a70f4cbf89c06f3f49769f35
|
import numpy as np
import matplotlib.pyplot as plt
import scipy.io as sio
from sklearn import svm
## Machine Learning Online Class - Exercise 6: Support Vector Machines
# Instructions
# ------------
#
# This file contains code that helps you get started on the
# linear exercise. You will need to complete the following functions
# in this exericse:
#
# gaussianKernel
# dataset3Params
#
# ==================== All function declaration ====================
def plotData(X, y):
pos = np.where(y==1)[0]
neg = np.where(y==0)[0]
plt.plot(X[pos, 0], X[pos, 1], 'b+', label='Training data')
plt.plot(X[neg, 0], X[neg, 1], 'ro', label='Training data')
plt.legend()
def visualizeBoundaryLinear(X, y, model):
w = model.coef_[0]
b = model.intercept_
xp = np.linspace(np.min(X[:,0]), np.max(X[:,0]), 100)
yp = -(w[0] * xp + b) / w[1] # Boundary is at y=0
plt.plot(xp, yp, 'b-', label='Decision boundary')
plt.legend()
def visualizeBoundary(X, y, model):
x1vals = np.linspace(np.min(X[:,0]), np.max(X[:,0]), 100)
x2vals = np.linspace(np.min(X[:,1]), np.max(X[:,1]), 100)
xv, yv = np.meshgrid(x1vals, x2vals)
zv = model.predict(np.c_[xv.ravel(), yv.ravel()])
zv = zv.reshape(xv.shape)
plt.contour(xv, yv, zv, [0,0], colors='blue', label='Decision Boundary')
plt.legend()
def sklearnGaussianKernel(X, Y, sigma):
m = X.shape[0]
n = Y.shape[0]
gram_matrix = np.zeros((m,n))
for i in xrange(m):
for j in xrange(n):
gram_matrix[i, j] = gaussianKernel(X[i,:], Y[j,:], sigma)
return gram_matrix
def gaussianKernel(x1, x2, sigma):
sim = 0
# ============= YOUR CODE HERE =============
# Instructions: Fill in this function to return the similarity between x1
# and x2 computed using a Gaussian kernel with bandwidth
# sigma
dis = np.sum(np.power(x1-x2, 2))
sim = np.exp(-dis/(2*np.power(sigma, 2)))
# ===========================================
return np.array([[sim]])
def dataset3Params(X, y, Xval, yval):
C = 1
sigma = 0.3
# ============= YOUR CODE HERE =============
# Instructions: Fill in this function to return the optimal C and sigma
# learning parameters found using the cross validation set.
# You can use model.predict to predict the labels on the cross
# validation set.
test = [0.01, 0.03, 0.1, 0.3, 1, 3, 10, 30]
max_score = 0
for i in xrange(8):
for j in xrange(8):
tC = test[i]
tSigma = test[j]
model = svm.SVC(C=tC, kernel='rbf', gamma=1.0/tSigma, max_iter=200)
model.fit(X, y)
score = model.score(Xval, yval)
print(score)
if score > max_score:
max_score = score
C = tC
sigma = tSigma
# ===========================================
return C, sigma
if __name__ == "__main__":
plt.close('all')
plt.ion() # interactive mode
# ==================== Part 1: Loading and Visualizing Data ====================
print('Loading and Visualizing Data ...')
data_file = '../../data/ex6/ex6data1.mat'
mat_content = sio.loadmat(data_file)
X = mat_content['X']
y = mat_content['y']
m, n = X.shape
plt.figure()
plotData(X, y)
raw_input('Program paused. Press enter to continue')
# =================== Part 2: Training Linear SVM ===================
C = 1
model = svm.SVC(C=C, kernel='linear', max_iter=20)
model.fit(X, y)
visualizeBoundaryLinear(X, y, model)
raw_input('Program paused. Press enter to continue')
# =================== Part 3: Implementing Gaussian Kernel ===================
print('Evaluating the Gaussian Kernel ...')
x1 = np.array([1, 2, 1], dtype='f')
x2 = np.array([0, 4, -1], dtype='f')
sigma = 2
sim = gaussianKernel(x1, x2, sigma)
print('Gaussian Kernel between x1 = [1; 2; 1], x2 = [0; 4; -1], sigma = 0.5 : %f\n(this value should be about 0.324652)' % sim);
raw_input('Program paused. Press enter to continue')
# =================== Part 4: Visualizing Dataset 2 ===================
print('Loading and Visualizing Data ...')
data_file = '../../data/ex6/ex6data2.mat'
mat_content = sio.loadmat(data_file)
X = mat_content['X']
y = mat_content['y']
m, n = X.shape
plt.figure()
plotData(X, y)
raw_input('Program paused. Press enter to continue')
# =================== Part 5: Training SVM with RBF Kernel (Dataset 2) ===================
print('Training SVM with RBF Kernel (this may take 1 to 2 minutes) ...')
C = 1.0
sigma = 0.1
# Use kernel function, but will be very slow
#kernel_func = lambda X, Y: sklearnGaussianKernel(X, Y, sigma)
#model = svm.SVC(C=C, kernel=kernel_func, max_iter=200)
# Use libSVM's RBF kernel
model = svm.SVC(C=C, kernel='rbf', gamma=1/sigma, max_iter=200)
model.fit(X, y)
print('Finish training, now draw contour decision boundary')
visualizeBoundary(X, y, model)
raw_input('Program paused. Press enter to continue')
# =================== Part 6: Visualizing Dataset 3 ===================
print('Loading and Visualizing Data ...')
data_file = '../../data/ex6/ex6data3.mat'
mat_content = sio.loadmat(data_file)
X = mat_content['X']
y = mat_content['y']
Xval = mat_content['Xval']
yval = mat_content['yval']
m, n = X.shape
plt.figure()
plotData(X, y)
raw_input('Program paused. Press enter to continue')
# =================== Part 7: Training SVM with RBF Kernel (Dataset 3 ===================
C, sigma = dataset3Params(X, y, Xval, yval)
print('C found is = %f' % C)
print('sigma found is = %f' % sigma)
# Use kernel function, but will be very slow
#kernel_func = lambda X, Y: sklearnGaussianKernel(X, Y, sigma)
#model = svm.SVC(C=C, kernel=kernel_func, max_iter=20, verbose=True)
# Use libSVM's RBF kernel
model = svm.SVC(C=C, kernel='rbf', gamma=1/sigma, max_iter=200)
model.fit(X, y)
visualizeBoundary(X, y, model)
raw_input('Program paused. Press enter to continue')
plt.close('all')
|
cameronlai/ml-class-python
|
solutions/ex6/ex6.py
|
Python
|
mit
| 6,354
|
[
"Gaussian"
] |
a5b38baaffb86c39b51c86c023cd4da5e4390296ae538ad71956b407afd2d7ac
|
# coding: utf-8
# Adam Petrone
# March 2014
import os
import re
import logging
from pegasus.commands import *
from pegasus.models import (
FileGroup,
FileWriter,
DefaultDict,
Driver,
Path,
ProductType,
VariableContext,
make_paths_relative_to_project)
from pegasus.core import Architecture
from pegasus.util import (
make_path_absolute,
get_working_directory
)
from pegasus.drivers.libxcode.scheme import write_xcode_scheme
from pegasus.drivers.libxcode.workspace import write_xcode_workspace
from pegasus.drivers.libxcode.pbx import *
#
# Support classes
# June 2016
# Adding support for FileGroups.
# March 2015
# It has been exactly one year. This code is a complete and utter mess
# of prototype. This needs to be refactored into something sane.
# August 2015
# Starting with Xcode7, TBD files have been introduced.
# These are text-based stub libraries that provide a compact version
# of the stub libraries in the SDK.
# It looks like these should just be added as flags to the libtool
# instead of directly adding the dylib to the project.
#
# -fembed-bitcode-marker is added when 'ENABLE_BITCODE' is set on the
# project. However, when archiving, '-fembed-bitcode' is used instead.
#
class XcodeArchitectureMap(object):
def __init__(self, architecture):
if architecture == None:
raise Exception("architecture is None. Must be a valid architecture string!")
self.arch = architecture
self.arch_map = DefaultDict(None)
self.arch_map[Architecture.x86] = "i386"
self.arch_map[Architecture.arm64] = "arm64"
def __str__(self):
arch = self.arch_map[self.arch]
if not arch:
return str(self.arch)
return str(arch)
class XcodeFileReferenceData(object):
def __init__(self, **kwargs):
self.pattern = kwargs.get("pattern", None)
self.is_source = kwargs.get("build", False)
self.file_class = kwargs.get("file_class", None)
self.is_framework = kwargs.get("is_framework", None)
self.is_resource = kwargs.get("is_resource", None)
self.is_variant = kwargs.get("is_variant", False)
self.is_directory = kwargs.get("is_directory", False)
def get_sort_key(item):
if isinstance(item, FileGroup):
return item.name.lower()
else:
return item.lower()
#
# Driver
class Xcode(Driver):
def __init__(self, requested_version):
super(Xcode, self).__init__()
self.driver_schema = None
# This is kind of nasty, but we have to maintain some state
# across project generations so we can connect interdependencies
# correctly.
self.product_to_xcodeproject = {}
def add_project_by_name(self, name, project):
self.product_to_xcodeproject[name] = project
def get_project_by_name(self, name):
try:
return self.product_to_xcodeproject[name]
except:
logging.info("'%s' is not in the product-to-project dict" % name)
raise
def post_load_driver_schema(self, schema):
logging.info("loading driver schema")
self.driver_schema = schema["xcode"]
def is_platform_compatible(self, host_platform_name, platform_string):
return host_platform_name == "macosx"
def prebuild_commands_for_dependency(self, architecture, configuration, target_platform, product_context, dependency_context):
prebuild_commands = []
dc = VariableContext()
dc.inherit(dependency_context)
dc.update(architecture="${CURRENT_ARCH}")
dc.dependency_product_root = dependency_context.get("abs_product_root", expand=False)
tc = VariableContext()
tc.inherit(product_context)
tc.update(architecture="${CURRENT_ARCH}")
tc.toplevel_product_root = product_context.get("abs_product_root", expand=False)
dependency_path = os.path.join(dc.dependency_product_root, target_platform.get_full_product_path(dependency_context.product))
#logging.info("dependency_path = %s" % dependency_path)
output_root = os.path.dirname(os.path.join(tc.toplevel_product_root, target_platform.get_full_product_path(product_context.product)))
#logging.info("output_root = %s" % output_root)
product = dependency_context.product
if product.output == ProductType.DynamicLibrary:
# 1. Must copy the dylib to application's bundle path
# 2. Must run install_name_tool to fix rpath
frameworks_path = Path.absolute(os.path.join(output_root, os.path.pardir, "Frameworks"))
#logging.info("frameworks_path = %s" % frameworks_path)
# Assume this was already created.
md = Makedirs(path=frameworks_path)
prebuild_commands.append(md.commandline())
# copy the dynamic library to the Frameworks path for the toplevel product.
cp = Copy(src=dependency_path, dst=frameworks_path)
prebuild_commands.append(cp.commandline())
dependency_basename = os.path.basename(dependency_path)
new_output_path = os.path.join(frameworks_path, dependency_basename)
#logging.info("new_output_path: %s" % new_output_path)
#else:
# logging.info("Unknown product: %s, %s" % (product.output, product.name))
#logging.info("COMMANDS: ")
#logging.info("\n".join(prebuild_commands))
return prebuild_commands
def cleanse_commands(self, commandlist):
output = []
for item in commandlist:
output.append(item.replace("\"", "\\\""))
return output
def split_compiler_flags(self, flags):
# everything that starts with -W will be a 'warning_cflag'
warning_cflags = []
# everything else will have to go here
other_cflags = []
for option in flags:
if option.startswith("-W"):
warning_cflags.append(option)
else:
other_cflags.append(option)
return (other_cflags, warning_cflags)
def generate_workspace(self, executor, filemanager, relative_project_names, workspace_path):
workspace_contents = "contents.xcworkspacedata"
# make dirs
filemanager.makedirs(workspace_path)
# append contents name onto path and create the file
workspace_filename = os.path.join(workspace_path, workspace_contents)
workspace_file = filemanager.create(workspace_filename)
# write this out to disk given the relative_project_names we generated above.
write_xcode_workspace(workspace_file, relative_project_names)
def organize_sources(self,
sorted_source_list,
context,
group_root,
group_map,
groups,
frameworksReference,
resourcesReference,
sourcesReference,
file_references,
file_reference_section,
build_file_section,
source_references,
resources_references,
frameworks_group,
frameworks_build_phase):
for relative_file_path in sorted_source_list:
is_absolute = False
absolute_file_path = None
if isinstance(relative_file_path, FileGroup):
self.organize_sources(relative_file_path.sources,
context,
group_root,
group_map,
groups,
frameworksReference,
resourcesReference,
sourcesReference,
file_references,
file_reference_section,
build_file_section,
source_references,
resources_references,
frameworks_group,
frameworks_build_phase)
continue
# this is an absolute path to a file
if relative_file_path[0] == os.path.sep:
is_absolute = True
absolute_file_path = relative_file_path
else:
absolute_file_path = make_path_absolute(context, relative_file_path)
raw_pieces = relative_file_path.split(os.path.sep)
# loop through the directories
total_pieces = len(raw_pieces)
group_name = None
group_ref = None
full_path = ""
pieces = []
parent_ids = []
if not is_absolute:
# pre-process raw_pieces to combine multiple parent directory references
# into a single entry with the first non-parent ref name
# Example: ["..", "..", "src"] becomes ["../../src"]
for x in xrange(0, total_pieces):
path = ""
if raw_pieces[x] == PARENT_DIR:
parent_ids.append(x)
continue
else:
for z in parent_ids:
path = os.path.join(path, raw_pieces[z])
parent_ids = []
path = os.path.join(path, raw_pieces[x])
pieces.append(path)
total_pieces = len(pieces)
for x in xrange(0,total_pieces-1):
group_name = pieces[x]
group_path = group_name
if PARENT_DIR in group_name:
group_name = os.path.basename(group_name)
#logging.info("group_name is: %s" % group_name)
full_path = os.path.join(full_path, group_name)
#logging.info("group_name: %s, group_path: %s" % (group_name, group_path))
if not group_map.has_key(full_path):
group_map[full_path] = {}
prev_group = group_ref
#logging.info("full_name = %s, relative_file_path: %s" % (full_path, relative_file_path))
if not group_name in group_map[full_path]:
assert(group_name)
#logging.info("adding group (name=%s, path=%s)" % (group_name, group_path))
group_ref = self.create_group(name=group_name, path=group_path)
group_map[full_path][group_name] = group_ref
groups.children.append(group_ref)
if prev_group:
prev_group.children().append(group_ref.key)
group_ref = group_map[full_path][group_name]
if not group_ref:
# If no group_ref exists; it's likely the file is at the root level.
group_ref = group_root
# Each file needs a PBXBuildFile and PBXFileReference
# the PBXBuildFile is referenced by the associated Build Phase.
# Files are organized inside of Xcode using Groups --
# so we'll create a group at each level of the file hierarchy.
# For a file: "src/core/memory.cpp" -- create the two groups "src", and "core".
# The PBXBuildFile reference for "memory.cpp" is a child of "core", and "core" is a child of "src".
# Fetch properties based on the relative_file_path.
file_data = self.determine_filereference_class(relative_file_path, absolute_file_path)
# default build_phase
build_phase = sourcesReference
# NOTE: just grab the basename if it contains PARENT_DIR. This matches the behavior in the loop above
if not is_absolute:
if PARENT_DIR in pieces[-1]:
fileref_name = os.path.basename(pieces[-1])
else:
fileref_name = pieces[-1]
fileref_path = pieces[-1]
else:
fileref_name = os.path.basename(relative_file_path)
fileref_path = relative_file_path
sourceTree = ABSOLUTE if is_absolute else RELATIVE_TO_GROUP
link_framework = None
if file_data.is_framework:
# Frameworks Build Phase
build_phase = frameworksReference
fileref_path, sourceTree, link_framework = self.determine_framework_props(relative_file_path)
# Create a PBXFileReference and add the file_ref to the section.
if fileref_name in file_references:
file_ref = file_references[fileref_name]
else:
file_ref = PBXFileReference(file_type=file_data.file_class, name=fileref_name, path=fileref_path, source_tree=sourceTree)
file_reference_section.children.append(file_ref)
file_references[fileref_name] = file_ref
if file_data.is_resource and not file_data.is_directory:
build_phase = resourcesReference
# Source files must be added to the build file section.
# Frameworks fall under this category.
if file_data.is_source:
build_ref = PBXBuildFile(build_phase=build_phase, file_ref=file_ref.reference)
build_file_section.children.append(build_ref)
if not file_data.is_framework and not file_data.is_resource:
source_references.append(build_ref.reference)
elif link_framework:
frameworks_build_phase.files().append(build_ref.reference)
# Resources are added to the Copy Build Phase...
# Unless they are Asset Catalogs or Folder references (like .xcassets)
if file_data.is_resource:
resources_references.append(build_ref.reference)
# Place this into the group, unless it's a Framework.
# Frameworks are placed into a separate "Frameworks" group in the Project Hierarchy.
if not file_data.is_framework:
group_ref.children().append(file_ref.reference)
else:
frameworks_group.add_child(file_ref.reference)
def generate(self, **kwargs):
executor = kwargs.get("executor", None)
buildfile = kwargs.get("buildfile", None)
filemanager = kwargs.get("filemanager", None)
target_platform = kwargs.get("target_platform", None)
context_list = kwargs.get("context_list", None)
# Key value pair relating the product name to target ref.
# This is used when generating schemes so they can setup dependencies.
target_ref_dict = {}
for context in context_list:
product = context.product
product_name = product.name
output = product.output
# Certain attributes must be specified per project (instead of for each configuration/architecture).
# This is a limitation of how Xcode handles handles. We can work around this by using
# the first params instance in the list.
#
main_params = context.params_list[0]
architectures, configurations = buildfile.get_layout_params(product)
sources = make_paths_relative_to_project(context, main_params.sources)
resources = make_paths_relative_to_project(context, main_params.resources)
project_name = self.get_project_name(product)
project_path = os.path.join(context.abs_project_root, project_name)
pbxproj_path = os.path.join(project_path, "project.pbxproj")
# open a file
handle = filemanager.create(pbxproj_path)
# instance a new FileWriter and open the path
writer = FileWriter(handle)
scheme_path = os.path.join(project_path, "xcshareddata", "xcschemes")
# ensure this exists
filemanager.makedirs(scheme_path)
# setup instances for project-wide settings
ms = MainSection()
prebuildReference = PBXReference(comment="Prebuild")
postbuildReference = PBXReference(comment="Postbuild")
resourcesReference = PBXReference(comment="Resources")
sourcesReference = PBXReference(comment="Sources")
frameworksReference = PBXReference(comment="Frameworks")
buildPhasesList = []
# compile a list of frameworks. These also need to be in the BuildFile and FileReference sections.
frameworks = []
# ...
local_ldflags = []
# a list of dynamic libraries
libraries = []
prebuild_commands = main_params.prebuild_commands
postbuild_commands = main_params.postbuild_commands
frameworks = main_params.driver.frameworks
libraries = main_params.driver.libraries
local_ldflags = main_params.driver.linkflags
# setup main section
ms.children.append(IntValue("archiveVersion", 1))
ms.children.append(ArrayKeyValue("classes", []))
ms.children.append(IntValue("objectVersion", 46))
# setup frameworks build phase
frameworks_build_phase = PBXFrameworksBuildPhaseSection()
frameworks_build_phase.frameworks_reference = frameworksReference
# all objects
objects = []
build_file_section = PBXBuildFileSection()
container_item_proxy = PBXContainerItemProxySection()
file_reference_section = PBXFileReferenceSection()
# file reference dictionary; prevents duplicate file references from being added
file_references = {}
# files that are compiled in some way
source_references = []
# files that are added to the resources build phase
resources_references = []
# variant groups that are added to the PBXVariantGroup section
variant_groups = []
# framework file references
framework_file_references = []
group_map = {}
groups = PBXGroupSection()
reference_proxies = []
dependent_project_root_items = []
project_group_ref_items = []
target_dependencies = []
for instance in context.ordered_dependencies:
xcodeproject_name = "%s.xcodeproj" % instance.name
xcp = self.get_project_by_name(instance.name)
# create a PBXFileReference for the xcodeproject
file_data = self.determine_filereference_class(xcodeproject_name)
project_file_ref = PBXFileReference(file_type=file_data.file_class, path=xcodeproject_name, source_tree=RELATIVE_TO_GROUP)
file_reference_section.children.append(project_file_ref)
file_references[xcodeproject_name] = project_file_ref
dependent_project_root_items.append(project_file_ref.reference)
# create a container proxy and add it to the section
container_proxy = PBXContainerItemProxy(
container_portal=project_file_ref.reference,
proxy_type=PBXContainerItemProxy.ProxyType.TARGET_REFERENCE,
remote_global_id=xcp.target_reference.value,
remote_info=instance.name
)
target_dependency_proxy = PBXContainerItemProxy(
container_portal=project_file_ref.reference,
proxy_type=PBXContainerItemProxy.ProxyType.PRODUCT_REFERENCE,
remote_global_id=xcp.product_reference.value,
remote_info=instance.name
)
reference_proxy = PBXReferenceProxy(path=("lib%s.a" % instance.name), remote_ref=target_dependency_proxy.reference)
reference_proxies.append(reference_proxy)
target_dependency = PBXTargetDependency(name=instance.name, target_proxy=container_proxy.reference)
target_dependencies.append(target_dependency)
data = {
"name": instance.name,
"container_portal": project_file_ref.reference,
"reference_proxy": reference_proxy
}
project_group_ref_items.append(data)
# this is so they're added in the order that Xcode normally adds them in: proxyType 2 then 1.
container_item_proxy.children.append(target_dependency_proxy)
container_item_proxy.children.append(container_proxy)
# The main project also needs a PBXBuildFile reference pointing to the
# dependent library.
# This gets referenced in the frameworks build phase.
library_buildfile = PBXBuildFile(build_phase=frameworksReference, file_ref=reference_proxy.reference)
frameworks_build_phase.files().append(library_buildfile.reference)
build_file_section.children.append(library_buildfile)
# logging.info([d.name for d in context.ordered_dependencies])
# these groups end up in the project file view in an Xcode project.
libraries_group = self.create_group(name="Libraries", children=[], source_tree=RELATIVE_TO_GROUP)
frameworks_group = self.create_group(name="Frameworks", children=[], source_tree=RELATIVE_TO_GROUP)
group_root = self.create_group(name=product_name, children=[libraries_group.key, frameworks_group.key], source_tree=RELATIVE_TO_GROUP)
# process library files (these are to placed into the project's "Libraries" folder and linked with in the Frameworks Build Phase)
for library_path in libraries:
base_name = os.path.basename(library_path)
dir_name = os.path.dirname(library_path)
file_data = self.determine_filereference_class(base_name)
if base_name not in file_references:
file_ref = PBXFileReference(file_type=file_data.file_class, name=base_name, path=library_path, source_tree=RELATIVE_TO_GROUP)
file_reference_section.children.append(file_ref)
file_references[base_name] = file_ref
else:
file_ref = file_references[base_name]
build_ref = PBXBuildFile(build_phase=frameworksReference, file_ref=file_ref.reference)
build_file_section.children.append(build_ref)
frameworks_build_phase.files().append(build_ref.reference)
libraries_group.children().append(file_ref.reference)
# build all file references
# This creates PBXBuildFiles, PBXFileReferences, and organizes them in the Project tree within Xcode.
# Frameworks, Dynamic Libraries, and Resources are also in this mix. They are typically added to a different build phase.
# This should handle relative paths by combining ".." strings appropriately.
#
sorted_source_list = sorted(sources+frameworks+resources, key=get_sort_key)
self.organize_sources(sorted_source_list,
context,
group_root,
group_map,
groups,
frameworksReference,
resourcesReference,
sourcesReference,
file_references,
file_reference_section,
build_file_section,
source_references,
resources_references,
frameworks_group,
frameworks_build_phase)
# On Adding a new group..
# Create a new group (self.create_group)
# Populate with children
# Add new group to the groups.children
# Insert the Frameworks reference somewhere in the application children
# If this is a DynamicLibrary or Bundle, a postbuild step is added to update the install_name to reflect
# the relative path of the product.
if product.output == ProductType.DynamicLibrary or product.output == ProductType.MacOSX_Bundle:
context.update(architecture="<VAR>{CURRENT_ARCH}", configuration="<VAR>{CONFIGURATION}")
# get the full path to this product
full_path = os.path.join(context.product_root, target_platform.get_full_product_path(product))
# then the relative path...
output_path = os.path.relpath(full_path, main_params.context.product.project_root)
# find the install name for the Root Product
install_name_format = target_platform.get_product_install_name(context.product)
install_name = install_name_format % target_platform.get_full_product_name(product)
install_name_tool = InstallNameTool(command="id", name=install_name, input=output_path)
#logging.info(install_name_tool.commandline())
postbuild_commands.append(install_name_tool.commandline())
# retrieve the product data for the output type
product_data = self.get_product_data(output)
explicit_file_type = product_data["file_type"]
extension = product_data["extension"]
applicationBundleName = self.get_bundle_name(product)
appFileReference = PBXReference(comment=applicationBundleName)
frprops = [
IdValue("isa", "PBXFileReference"),
IdValue("explicitFileType", explicit_file_type),
IntValue("includeInIndex", 0),
StringValue("name", applicationBundleName),
StringValue("path", applicationBundleName),
IdValue("sourceTree", "BUILT_PRODUCTS_DIR")
]
fr = ArrayKeyValue(appFileReference, frprops)
file_reference_section.children.append(fr)
# PBXBuildFile
objects.append(build_file_section)
# PBXContainerItemProxy
objects.append(container_item_proxy)
# PBXFileReference
objects.append(file_reference_section)
#
# add frameworks builds phase
objects.append(frameworks_build_phase)
# root level references
group_items = []
for key, value in group_map.iteritems():
if not "/" in key:
group_items.append(value[key].key)
# setup the root reference:
# add top level groups to this.
group_root.children().extend(group_items)
group_root.children().extend(dependent_project_root_items)
# add Product reference
products = self.create_group(name="Products", children=[appFileReference], source_tree=RELATIVE_TO_GROUP)
groups.children.append(group_root)
if libraries_group.children():
groups.children.append(libraries_group)
if frameworks_group.children():
groups.children.append(frameworks_group)
groups.children.append(products)
group_root.children().append(products.key)
# loop through container refs and add a product group for them
objects.append(groups)
nativeTargetConfiguration = PBXReference(comment="Build configuration list for PBXNativeTarget \"%s\"" % product_name)
#
# The list must maintain the ordering of the references.
# 1. Copy resources
buildPhasesList.append(resourcesReference)
# 2. (if exists) Prebuild commands
if prebuild_commands:
buildPhasesList.append(prebuildReference)
# 3. Sources (compile)
buildPhasesList.append(sourcesReference)
# 4. Link with Frameworks
buildPhasesList.append(frameworksReference)
# 5. (if exists) Postbuild commands
if postbuild_commands:
buildPhasesList.append(postbuildReference)
#
# Setup the target reference
targetRef = PBXReference(comment=product_name)
targetRefProps = [
IdValue("isa", "PBXNativeTarget"),
IdValue("buildConfigurationList", nativeTargetConfiguration),
ListValue("buildPhases", buildPhasesList),
ListValue("buildRules", []),
ListValue("dependencies", [dep.reference for dep in target_dependencies]),
IdValue("name", product_name), # was StringValue, but Xcode changes these to IdValues
StringValue("productInstallPath", "$(HOME)/Applications"),
IdValue("productName", product_name), # was StringValue, but Xcode changes these to IdValues
IdValue("productReference", appFileReference),
StringValue("productType", product_data["type"])
]
target = ArrayKeyValue(targetRef, targetRefProps)
target_ref_dict[product_name] = targetRef
nativetargets = PBXNativeTargetSection()
nativetargets.children.append(target)
objects.append(nativetargets)
xcodeproj = XcodeProject(product=product, product_reference=appFileReference, target_reference=targetRef)
self.add_project_by_name(product.name, xcodeproj)
projectBuildConfigurationList = PBXReference(comment="Build configuration list for PBXProject \"%s\"" % product_name)
project_ref_list = []
for data in project_group_ref_items:
#product_group = create_group
product_group = self.create_group(name="Products", children=[data["reference_proxy"].reference], source_tree=RELATIVE_TO_GROUP)
groups.children.append(product_group)
project_ref_items = [
IdValue("ProductGroup", product_group.key),
IdValue("ProjectRef", data["container_portal"])
]
# construct root items
project_ref_list.append(ArrayValue(None, project_ref_items))
# this later becomes our rootObject.
projectRef = PBXReference(comment="Project object")
projectProperties = [
IdValue("isa", "PBXProject"),
IdValue("attributes", ArrayValue(None, [IdValue("LastUpgradeCheck", LAST_UPGRADE_CHECK)])), # new in Xcode 5
IdValue("buildConfigurationList", projectBuildConfigurationList),
StringValue("compatibilityVersion", "Xcode 3.2"),
IdValue("developmentRegion", "English"), # new in Xcode 5
IntValue("hasScannedForEncodings", 1),
ListValue("knownRegions", ["en"]), # new in Xcode 5
IdValue("mainGroup", group_root.key), # PBXGroup reference
StringValue("projectDirPath", ""),
StringValue("projectRoot", ""),
IdValue("productRefGroup", products.key),
ListValue("targets", [targetRef]), # Native Target refs
#IdValue("productRefGroup", products)
ListValue("projectReferences", project_ref_list)
]
project = PBXProjectSection()
project.children.append(ArrayKeyValue(projectRef, projectProperties))
objects.append(project)
referenceProxySection = PBXReferenceProxySection()
referenceProxySection.children.extend(reference_proxies)
objects.append(referenceProxySection)
resourcesProperties = [
IdValue("isa", "PBXResourcesBuildPhase"),
IntValue("buildActionMask", 2147483647),
ListValue("files", resources_references),
IntValue("runOnlyForDeploymentPostprocessing", 0)
]
resourcePhase = ArrayKeyValue(resourcesReference, resourcesProperties)
resources = PBXResourcesBuildPhaseSection()
resources.children.append(resourcePhase)
objects.append(resources)
if prebuild_commands:
prebuild = PBXShellScriptBuildPhaseSection()
processed_commands = self.cleanse_commands(prebuild_commands)
properties = [
IdValue("isa", "PBXShellScriptBuildPhase"),
IntValue("buildActionMask", 2147483647),
ListValue("files", []),
ListValue("inputPaths", []),
IdValue("name", "Prebuild"),
ListValue("outputPaths", []),
IntValue("runOnlyForDeploymentPostprocessing", 0),
IdValue("shellPath", "/bin/sh"),
StringValue("shellScript", "\n".join(processed_commands))
]
prebuild.children.append(ArrayKeyValue(prebuildReference, properties))
objects.append(prebuild)
sourcesProperties = [
IdValue("isa", "PBXSourcesBuildPhase"),
IntValue("buildActionMask", 2147483647),
ListValue("files", source_references),
IntValue("runOnlyForDeploymentPostprocessing", 0),
]
sources = PBXSourcesBuildPhaseSection()
sources.children.append(ArrayKeyValue(sourcesReference, sourcesProperties))
objects.append(sources)
# Better yet:
#sources = PBXSourcesBuildPhase(files=source_references)
#source_section = PBXSourcesBuildPhaseSection(sourcesReference, sources)
target_dependency_section = PBXTargetDependencySection()
target_dependency_section.children.extend(target_dependencies)
objects.append(target_dependency_section)
if postbuild_commands:
processed_commands = self.cleanse_commands(postbuild_commands)
postbuild = PBXShellScriptBuildPhaseSection()
properties = [
IdValue("isa", "PBXShellScriptBuildPhase"),
IntValue("buildActionMask", 2147483647),
ListValue("files", []),
ListValue("inputPaths", []),
IdValue("name", "Postbuild"),
ListValue("outputPaths", []),
IntValue("runOnlyForDeploymentPostprocessing", 0),
IdValue("shellPath", "/bin/sh"),
StringValue("shellScript", "\n".join(processed_commands))
]
postbuild.children.append(ArrayKeyValue(postbuildReference, properties))
objects.append(postbuild)
variant = PBXVariantGroupSection()
variant.children = variant_groups
objects.append(variant)
build_configuration_section = XCBuildConfigurationSection()
objects.append(build_configuration_section)
# Per Layout variables
target_configurations = []
product_configurations = []
for configuration in configurations:
target_configuration_reference = PBXReference(comment=configuration)
product_configuration_reference = PBXReference(comment=configuration)
build_settings = []
for architecture in architectures:
#product = base_product.target(configuration=configuration, architecture=architecture)
params = context.get_layout(architecture, configuration)
local_context = params.context
product = local_context.product
# Intentionally omit the architecture from the context update call.
# Since this supports a universal binary, we don't specify a single architecture.
# Variable replacement for "architecture", must be overridden so we specify the full list here.
# This allows context's logic to turn this into something generic, because MacOS prefers fat binaries
# and doesn't allow per architecture overrides for all their project settings. (product/obj dirs)
#context.update(product=product, architecture=architectures, configuration=configuration)
arch_map = XcodeArchitectureMap(architecture)
subkey = "arch=%s" % arch_map
#valid_archs = [self.arch_map[x] if self.arch_map.has_key(x) else x for x in architectures]
valid_archs = [str(XcodeArchitectureMap(arch)) for arch in architectures]
generic_settings = []
arch_is_non_standard = False
arch = "$(ARCHS_STANDARD)"
#logging.info("TODO: move these to MacOS X target platform!")
if target_platform.matches("macosx"):
if "i386" in valid_archs and "x86_64" in valid_archs:
arch = "$(ARCHS_STANDARD_32_64_BIT)"
arch_is_non_standard = True
elif len(valid_archs) == 1 and valid_archs[0] == "i386":
arch = "$(ARCHS_STANDARD_32_BIT)"
arch_is_non_standard = True
else:
# We filter out this architecture (which doesn't make an explicit appearance)
# And replace the subkey with "Any iOS Simulator SDK"
if architecture == Architecture.x86:
subkey = "sdk=iphonesimulator*"
# ARCHS: architectures to which the binary is targeted
if arch_is_non_standard:
generic_settings.append(StringValue("ARCHS", arch))
# VALID_ARCHS: architectures for which the binary may be built
#logging.info("TODO: move to macosx TargetPlatform")
if target_platform.matches("macosx"):
# remove i386 from the list
if "i386" in valid_archs:
valid_archs.remove("i386")
# this is now setup to use the LIST, which will default to a generic folder
# path, if it contains more than one item.
local_context.update(architecture=architectures)
generic_settings.append(StringValue("VALID_ARCHS", " ".join(valid_archs)))
generic_settings.append(StringValue("PRODUCT_NAME", product_name))
generic_settings.append(StringValue("CONFIGURATION_BUILD_DIR", local_context.abs_product_root))
if params.object_root:
# The project is created with a relative path to the object root.
object_root = os.path.join(context.project_to_base_relative, local_context.object_root)
generic_settings.append(StringValue("OBJROOT", object_root))
generic_settings.append(StringValue("CONFIGURATION_TEMP_DIR", object_root))
generic_settings.append(StringValue("SYMROOT", object_root))
# restore back to a single architecture
local_context.update(architecture=architecture)
# preprocess the links attribute
other_ldflags = params.linkflags[:]
other_cflags, warning_cflags = self.split_compiler_flags(params.cflags[:])
# GCC_PREPROCESSOR_DEFINITIONS takes care of this list
#if product.defines:
#other_cflags += [("-D%s" % x) for x in product.defines]
other_cplusplusflags = params.cxxflags[:]
# These have to be added as a PBXBuildFile and PBXFileReference.
# The PBXBuildFile is added to a Frameworks group in the PBXFrameworksBuildPhase.
# That Group is referenced by the PBXNativeTarget.
# The PBXFileReference is added to PBXGroup "Frameworks".
other_ldflags = params.driver.linkflags
#local_frameworks = params.driver.frameworks
#local_libraries = params.driver.libraries
if other_ldflags:
ldflags = [StringValue(x, None, key_only=True) for x in other_ldflags]
build_settings.append(ListValue("OTHER_LDFLAGS", ldflags, subkey=subkey))
if params.defines:
defines = [StringValue(x, None, key_only=True) for x in params.defines]
build_settings.append(ListValue("GCC_PREPROCESSOR_DEFINITIONS", defines, subkey=subkey))
includes = make_paths_relative_to_project(context, params.includes)
if includes:
include_paths = []
for path in includes:
include_paths.append(StringValue(path, None, key_only=True))
build_settings.append(ListValue("HEADER_SEARCH_PATHS", include_paths, subkey=subkey))
# "MACH_O_TYPE": ["mh_executable", "mh_dylib", "mh_bundle", "staticlib", "mh_object"]
# "DEBUG_INFORMATION_FORMAT" : ["dwarf", "dwarf-with-dsym"]
#libdirs = dependencies.libdirs + (product.libdirs if product.libdirs else [])
if params.libdirs:
lib_paths = []
for path in params.libdirs:
lib_paths.append(StringValue(path, None, key_only=True))
build_settings.append(ListValue("LIBRARY_SEARCH_PATHS", lib_paths, subkey=subkey))
# allow the build scripts to override default variables
for key,_ in self.driver_schema.iteritems():
value = getattr(params.driver, key, None)
if value:
#logging.info("DRIVER %s -> %s" % (key.upper(), value))
build_settings.append(StringValue(key.upper(), value))
#logging.info("TODO: Move CODE_SIGN_IDENTITY to platform: iphoneos")
if target_platform.matches("iphoneos") or target_platform.matches("iphonesimulator"):
build_settings.append(StringValue("CODE_SIGN_IDENTITY", "iPhone Developer"))
# extra build settings for frameworks
# FRAMEWORK_VERSION = A
# DYLIB_COMPATIBILITY_VERSION = 1
# DYLIB_CURRENT_VERSION = 1
# WRAPPER_EXTENSION = framework
build_settings.append(StringValue("OTHER_CFLAGS", " ".join(other_cflags)))
build_settings.append(StringValue("OTHER_CPLUSPLUSFLAGS", " ".join(other_cplusplusflags)))
build_settings.append(StringValue("WARNING_CFLAGS", " ".join(warning_cflags)))
build_configuration = [
IdValue("isa", "XCBuildConfiguration"),
IdValue("buildSettings", ArrayValue(None, generic_settings+build_settings)),
StringValue("name", configuration)
]
target_ref = ArrayKeyValue(target_configuration_reference, build_configuration)
product_ref = ArrayKeyValue(product_configuration_reference, build_configuration)
build_configuration_section.children.append(target_ref)
build_configuration_section.children.append(product_ref)
target_configurations.append(target_configuration_reference)
product_configurations.append(product_configuration_reference)
# Generate Scheme file for configuration
scheme_format = "%(product_name)s - %(configuration)s"
scheme_vars = {
"product_name" : product_name,
"configuration" : configuration
}
scheme_filename = "%s.xcscheme" % (scheme_format % scheme_vars)
full_scheme_path = os.path.join(scheme_path, scheme_filename)
scheme_file = filemanager.create(full_scheme_path)
# Create a list of dependent references
dependent_references = []
for dependency in context.ordered_dependencies:
reference = target_ref_dict[dependency.name]
dependent_references.append((dependency, reference))
write_xcode_scheme(
self,
scheme_file,
project_name,
projectRef,
product,
configuration,
product.commandline,
dependent_references
)
configProperties = [
IdValue("isa", "XCConfigurationList"),
ListValue("buildConfigurations", target_configurations),
IntValue("defaultConfigurationIsVisible", 0),
IdValue("defaultConfigurationName", configurations[0]) # Was StringValue, but Xcode converts this to IdValue
]
targetConfigurations = ArrayKeyValue(nativeTargetConfiguration, configProperties)
configProperties = [
IdValue("isa", "XCConfigurationList"),
ListValue("buildConfigurations", product_configurations),
IntValue("defaultConfigurationIsVisible", 0),
IdValue("defaultConfigurationName", configurations[0]) # Was StringValue, but Xcode converts this to IdValue
]
projectConfigurations = ArrayKeyValue(projectBuildConfigurationList, configProperties)
# build it all together for the configuration list section
buildConfigurationList = XCConfigurationListSection()
buildConfigurationList.children.append(targetConfigurations)
buildConfigurationList.children.append(projectConfigurations)
objects.append(buildConfigurationList)
ms.children.append(ArrayKeyValue("objects", objects))
ms.children.append(IdValue("rootObject", projectRef))
ms.visit(writer)
if len(context_list) > 1:
product_data = {}
root = context_list[0].abs_root
for context in context_list:
product_data[context.product.name] = os.path.relpath(context.abs_project_root, context.abs_root)
# compile the project name list
relative_project_names = []
for key, value in product_data.iteritems():
xcodeproj_name = "%s.xcodeproj" % key
project_path = os.path.join(value, xcodeproj_name)
relative_project_names.append(project_path)
# setup name, path and filename for workspace items
workspace_name = "%s.xcworkspace" % context.buildfile.filename
workspace_path = os.path.join(root, workspace_name)
self.generate_workspace(executor, filemanager, relative_project_names, workspace_path)
def determine_framework_props(self, filename):
#logging.info("framework: %s" % filename)
SYSTEM_LIBRARY = ("System/Library/Frameworks", SDKROOT, True)
LIBRARY = ("Library/Frameworks", DEVELOPER_DIR, False)
framework_items_to_paths = DefaultDict(SYSTEM_LIBRARY)
framework_items_to_paths["XCTest.framework"] = LIBRARY
value = framework_items_to_paths[filename]
return (os.path.join(value[0], filename), value[1], value[2])
def determine_filereference_class(self, filename, absolute_path = None):
self.file_reference_list = [
# C, C++ files
XcodeFileReferenceData(pattern=".*\.c$", build=True, file_class="sourcecode.c.c", is_framework=False, is_resource=False, is_variant=False, is_directory=False),
XcodeFileReferenceData(pattern=".*\.h$", build=False, file_class="sourcecode.c.h", is_framework=False, is_resource=False, is_variant=False, is_directory=False),
XcodeFileReferenceData(pattern=".*\.cpp$", build=True, file_class="sourcecode.cpp.cpp", is_framework=False, is_resource=False, is_variant=False, is_directory=False),
XcodeFileReferenceData(pattern=".*\.cc$", build=True, file_class="sourcecode.cpp.cpp", is_framework=False, is_resource=False, is_variant=False, is_directory=False),
XcodeFileReferenceData(pattern=".*\.hpp$", build=False, file_class="text", is_framework=False, is_resource=False, is_variant=False, is_directory=False),
# Objective-C files
XcodeFileReferenceData(pattern=".*\.m$", build=True, file_class="sourcecode.c.objc", is_framework=False, is_resource=False, is_variant=False, is_directory=False),
XcodeFileReferenceData(pattern=".*\.mm$", build=True, file_class="sourcecode.cpp.objcpp", is_framework=False, is_resource=False, is_variant=False, is_directory=False),
# Resource files
XcodeFileReferenceData(pattern=".*\.xcassets$", build=True, file_class="folder.assetcatalog", is_framework=False, is_resource=True, is_variant=False, is_directory=True),
XcodeFileReferenceData(pattern=".*\.strings$", build=True, file_class="text.plist.strings", is_framework=False, is_resource=True, is_variant=True, is_directory=False),
XcodeFileReferenceData(pattern=".*\.storyboard$", build=True, file_class="file.storyboard", is_framework=False, is_resource=True, is_variant=True, is_directory=False),
# OpenGL resources
XcodeFileReferenceData(pattern=".*\.fsh$", build=True, file_class="sourcecode.glsl", is_framework=False, is_resource=True, is_variant=False, is_directory=False),
XcodeFileReferenceData(pattern=".*\.vsh$", build=True, file_class="sourcecode.glsl", is_framework=False, is_resource=True, is_variant=False, is_directory=False),
# MacOS non-resource files
XcodeFileReferenceData(pattern=".*\.plist$", build=False, file_class="text.plist.xml", is_framework=False, is_resource=False, is_variant=False, is_directory=False),
XcodeFileReferenceData(pattern=".*\.xctest$", build=False, file_class="wrapper.cfbundle", is_framework=False, is_resource=False, is_variant=False, is_directory=False),
XcodeFileReferenceData(pattern=".*\.framework$", build=True, file_class="wrapper.framework", is_framework=True, is_resource=False, is_variant=False, is_directory=False),
XcodeFileReferenceData(pattern=".*\.dylib$", build=True, file_class="compiled.mach-o.dylib", is_framework=True, is_resource=False, is_variant=False, is_directory=False),
XcodeFileReferenceData(pattern=".*\.xib$", build=True, file_class="file.xib", is_framework=False, is_resource=True, is_variant=False, is_directory=False),
XcodeFileReferenceData(pattern=".*\.xcodeproj$", build=False, file_class="\"wrapper.pb-project\"", is_framework=False, is_resource=True, is_variant=False, is_directory=False),
]
default_text = \
XcodeFileReferenceData(
pattern="",
build=False,
file_class="text",
is_framework=False,
is_resource=False,
is_variant=False,
is_directory=False
)
directory_reference = \
XcodeFileReferenceData(
pattern=".*\/$",
build=True,
file_class="folder",
is_framework=False,
is_resource=True,
is_variant=False,
is_directory=True
)
for item in self.file_reference_list:
if re.search(item.pattern, filename):
#logging.info("MATCH: %s -> %s" % (filename, item.file_class))
return item
# physical directories should be treated as directory references
if absolute_path:
if os.path.isdir(absolute_path):
return directory_reference
logging.warn("WARNING: Could not determine FileReference class for \"%s\"!" % filename)
return default_text
def get_product_data(self, product_type):
PRODUCT_DATA = {
ProductType.Invalid : None,
ProductType.Application : {
"file_type": "wrapper.application",
"extension": "app",
"type": "com.apple.product-type.application"
},
ProductType.Commandline : {
"file_type": "compiled.mach-o.executable",
"extension": "",
"type": "com.apple.product-type.tool"
},
ProductType.DynamicLibrary : {
"file_type": "compiled.mach-o.dylib",
"extension": "dylib",
"type": "com.apple.product-type.library.dynamic"
},
ProductType.StaticLibrary : {
"file_type": "archive.ar",
"extension": "a",
"type": "com.apple.product-type.library.static"
},
ProductType.MacOSX_Bundle : {
"file_type": "wrapper.cfbundle",
"extension": "bundle",
"type": "com.apple.product-type.bundle"
},
ProductType.MacOSX_Framework : {
"file_type": "wrapper.framework",
"extension": "framework",
"type": "com.apple.product-type.framework"
}
}
if product_type not in PRODUCT_DATA.keys():
raise Exception("Unsupported product_type: %s" % output)
return PRODUCT_DATA[product_type]
def get_bundle_name(self, product):
product_data = self.get_product_data(product.output)
explicit_file_type = product_data["file_type"]
extension = (".%s" % product_data["extension"]) \
if product_data["extension"] else ""
return ("%s%s" % (product.name, extension))
def create_group(self, **kwargs):
name = kwargs.get("name", "Unnamed")
path = kwargs.get("path", None)
children = kwargs.get("children", [])
source_tree = kwargs.get("source_tree", RELATIVE_TO_GROUP)
properties = [
IdValue("isa", "PBXGroup"),
ListValue("children", children),
StringValue("name", name),
StringValue("sourceTree", source_tree)
]
if path:
properties.append(StringValue("path", path))
return ArrayKeyValue(PBXReference(comment=name), properties)
def build(self, **kwargs):
executor = kwargs.get("executor", None)
buildfile = kwargs.get("buildfile", None)
filemanager = kwargs.get("filemanager", None)
target_platform = kwargs.get("target_platform", None)
context_list = kwargs.get("context_list", None)
for context in context_list:
architectures, configurations = buildfile.get_layout_params(context.product)
try:
params = context.params_list[0]
except:
logging.error("Failed to get the first LayoutParams from ProductContext!")
raise
#for architecture in architectures:
for configuration in configurations:
#arch = XcodeArchitectureMap(architecture)
cd = ChangeDirectory(path=context.abs_root)
cd.run()
# If build for the iPhone simulator;
# simply pass SDKROOT=iphonesimulator
#logging.info("BUILD PRODUCT: %s, [archs=%s, config:%s]" % (context.product.name, architectures, configuration))
project_name = os.path.join(context.base_to_project_relative, context.product.name)
commandline = self.generate_xcodebuild_commandline(
action="build",
project_name=project_name,
configuration_name=configuration,
architectures=architectures,
product=context.product
)
executor.execute(commandline=commandline)
if target_platform.matches("iphoneos"):
# run another build command for iphonesimulator
commandline = self.generate_xcodebuild_commandline(
action="build",
project_name=os.path.join(context.base_to_project_relative,context.product.name),
configuration_name=configuration,
architectures=None,
product=context.product,
sdk="iphonesimulator%s" % params.driver.sdkroot_version
)
executor.execute(commandline=commandline)
cd.pop()
return None
def clean(self, **kwargs):
executor = kwargs.get("executor", None)
return None
def generate_xcodebuild_commandline(self, **kwargs):
action = kwargs.get("action", None)
project_name = kwargs.get("project_name", None)
workspace_name = kwargs.get("workspace_name", None)
configuration_name = kwargs.get("configuration_name", None)
target_name = kwargs.get("target_name", None)
alltargets_flag = kwargs.get("alltargets", None)
architectures = kwargs.get("architectures", None)
product = kwargs.get("product", None)
sdk = kwargs.get("sdk", None)
scheme_name = "%s - %s" % (product.name, configuration_name)
vars = []
if project_name:
vars.append("-project \"%s.xcodeproj\"" % project_name)
if workspace_name:
vars.append("-workspace \"%s.xcworkspace\"" % workspace_name)
#if configuration_name:
# vars.append("-configuration \"%s\"" % configuration_name)
if configuration_name:
vars.append("-scheme \"%s\"" % scheme_name)
if alltargets_flag:
vars.append("-alltargets")
if architectures:
# if architecture is a list, we provide the full list as valid_archs
# otherwise, it's just a string, so we specifically name that architecture to be built.
if type(architectures) is list and len(architectures) > 1:
archs = [str(XcodeArchitectureMap(architecture)) for architecture in architectures]
vars.append("VALID_ARCHS=\"%s\"" % " ".join(archs))
else:
architecture = str(XcodeArchitectureMap(architectures[0]))
vars.append("-arch %s ONLY_VALID_ARCHS=NO VALID_ARCHS=%s" % (architecture, architecture))
if sdk:
vars.append("-sdk %s" % sdk)
return "xcodebuild %s" % (" ".join(vars))
def get_project_name(self, product):
return "%s.xcodeproj" % product.name
|
apetrone/pegasus
|
drivers/xcode.py
|
Python
|
bsd-2-clause
| 48,957
|
[
"VisIt"
] |
ccc0e09358354dbdbd0d425fc1782bd54e2aa3b007a2eebf6c86fddf3edc704b
|
#
# Gramps - a GTK+/GNOME based genealogy program
#
# Copyright (C) 2000-2007 Donald N. Allingham
# Copyright (C) 2008 Brian G. Matherly
# Copyright (C) 2010 Jakim Friant
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
"""Tools/Database Processing/Find Possible Duplicate People"""
#-------------------------------------------------------------------------
#
# GNOME libraries
#
#-------------------------------------------------------------------------
from gi.repository import Gtk
#-------------------------------------------------------------------------
#
# Gramps modules
#
#-------------------------------------------------------------------------
from gramps.gen.const import URL_MANUAL_PAGE
from gramps.gen.lib import Event, Person
from gramps.gui.utils import ProgressMeter
from gramps.gui.plug import tool
from gramps.gen.soundex import soundex, compare
from gramps.gen.display.name import displayer as name_displayer
from gramps.gui.dialog import OkDialog
from gramps.gui.listmodel import ListModel
from gramps.gen.errors import WindowActiveError
from gramps.gui.merge import MergePerson
from gramps.gui.display import display_help
from gramps.gui.managedwindow import ManagedWindow
from gramps.gui.dialog import RunDatabaseRepair
from gramps.gen.const import GRAMPS_LOCALE as glocale
_ = glocale.translation.sgettext
from gramps.gui.glade import Glade
#-------------------------------------------------------------------------
#
# Constants
#
#-------------------------------------------------------------------------
_val2label = {
0.25 : _("Low"),
1.0 : _("Medium"),
2.0 : _("High"),
}
WIKI_HELP_PAGE = '%s_-_Tools' % URL_MANUAL_PAGE
WIKI_HELP_SEC = _('manual|Find_Possible_Duplicate_People')
#-------------------------------------------------------------------------
#
#
#
#-------------------------------------------------------------------------
def is_initial(name):
if len(name) > 2:
return 0
elif len(name) == 2:
if name[0] == name[0].upper() and name[1] == '.':
return 1
else:
return name[0] == name[0].upper()
#-------------------------------------------------------------------------
#
# The Actual tool.
#
#-------------------------------------------------------------------------
class DuplicatePeopleTool(tool.Tool, ManagedWindow):
def __init__(self, dbstate, user, options_class, name, callback=None):
uistate = user.uistate
tool.Tool.__init__(self, dbstate, options_class, name)
ManagedWindow.__init__(self, uistate, [],
self.__class__)
self.dbstate = dbstate
self.uistate = uistate
self.map = {}
self.list = []
self.index = 0
self.merger = None
self.mergee = None
self.removed = {}
self.update = callback
self.use_soundex = 1
top = Glade(toplevel="finddupes", also_load=["liststore1"])
# retrieve options
threshold = self.options.handler.options_dict['threshold']
use_soundex = self.options.handler.options_dict['soundex']
my_menu = Gtk.ListStore(str, object)
for val in sorted(_val2label):
my_menu.append([_val2label[val], val])
self.soundex_obj = top.get_object("soundex")
self.soundex_obj.set_active(use_soundex)
self.soundex_obj.show()
self.menu = top.get_object("menu")
self.menu.set_model(my_menu)
self.menu.set_active(0)
window = top.toplevel
self.set_window(window, top.get_object('title'),
_('Find Possible Duplicate People'))
self.setup_configs('interface.duplicatepeopletool', 350, 220)
top.connect_signals({
"on_do_merge_clicked" : self.__dummy,
"on_help_show_clicked" : self.__dummy,
"on_delete_show_event" : self.__dummy,
"on_merge_ok_clicked" : self.on_merge_ok_clicked,
"destroy_passed_object" : self.close,
"on_help_clicked" : self.on_help_clicked,
"on_delete_merge_event" : self.close,
"on_delete_event" : self.close,
})
self.show()
def build_menu_names(self, obj):
return (_("Tool settings"),_("Find Duplicates tool"))
def on_help_clicked(self, obj):
"""Display the relevant portion of Gramps manual"""
display_help(WIKI_HELP_PAGE , WIKI_HELP_SEC)
def ancestors_of(self, p1_id, id_list):
if (not p1_id) or (p1_id in id_list):
return
id_list.append(p1_id)
p1 = self.db.get_person_from_handle(p1_id)
f1_id = p1.get_main_parents_family_handle()
if f1_id:
f1 = self.db.get_family_from_handle(f1_id)
self.ancestors_of(f1.get_father_handle(),id_list)
self.ancestors_of(f1.get_mother_handle(),id_list)
def on_merge_ok_clicked(self, obj):
threshold = self.menu.get_model()[self.menu.get_active()][1]
self.use_soundex = int(self.soundex_obj.get_active())
try:
self.find_potentials(threshold)
except AttributeError as msg:
RunDatabaseRepair(str(msg), parent=self.window)
return
self.options.handler.options_dict['threshold'] = threshold
self.options.handler.options_dict['soundex'] = self.use_soundex
# Save options
self.options.handler.save_options()
if len(self.map) == 0:
OkDialog(
_("No matches found"),
_("No potential duplicate people were found"),
parent=self.window)
else:
try:
DuplicatePeopleToolMatches(self.dbstate, self.uistate,
self.track, self.list, self.map,
self.update)
except WindowActiveError:
pass
def find_potentials(self, thresh):
self.progress = ProgressMeter(_('Find Duplicates'),
_('Looking for duplicate people'),
parent=self.window)
index = 0
males = {}
females = {}
length = self.db.get_number_of_people()
self.progress.set_pass(_('Pass 1: Building preliminary lists'),
length)
for p1_id in self.db.iter_person_handles():
self.progress.step()
p1 = self.db.get_person_from_handle(p1_id)
key = self.gen_key(get_surnames(p1.get_primary_name()))
if p1.get_gender() == Person.MALE:
if key in males:
males[key].append(p1_id)
else:
males[key] = [p1_id]
else:
if key in females:
females[key].append(p1_id)
else:
females[key] = [p1_id]
self.progress.set_pass(_('Pass 2: Calculating potential matches'),
length)
for p1key in self.db.iter_person_handles():
self.progress.step()
p1 = self.db.get_person_from_handle(p1key)
key = self.gen_key(get_surnames(p1.get_primary_name()))
if p1.get_gender() == Person.MALE:
remaining = males[key]
else:
remaining = females[key]
#index = 0
for p2key in remaining:
#index += 1
if p1key == p2key:
continue
p2 = self.db.get_person_from_handle(p2key)
if p2key in self.map:
(v,c) = self.map[p2key]
if v == p1key:
continue
chance = self.compare_people(p1,p2)
if chance >= thresh:
if p1key in self.map:
val = self.map[p1key]
if val[1] > chance:
self.map[p1key] = (p2key,chance)
else:
self.map[p1key] = (p2key,chance)
self.list = sorted(self.map)
self.length = len(self.list)
self.progress.close()
def gen_key(self, val):
if self.use_soundex:
try:
return soundex(val)
except UnicodeEncodeError:
return val
else:
return val
def compare_people(self, p1, p2):
name1 = p1.get_primary_name()
name2 = p2.get_primary_name()
chance = self.name_match(name1, name2)
if chance == -1 :
return -1
birth1_ref = p1.get_birth_ref()
if birth1_ref:
birth1 = self.db.get_event_from_handle(birth1_ref.ref)
else:
birth1 = Event()
death1_ref = p1.get_death_ref()
if death1_ref:
death1 = self.db.get_event_from_handle(death1_ref.ref)
else:
death1 = Event()
birth2_ref = p2.get_birth_ref()
if birth2_ref:
birth2 = self.db.get_event_from_handle(birth2_ref.ref)
else:
birth2 = Event()
death2_ref = p2.get_death_ref()
if death2_ref:
death2 = self.db.get_event_from_handle(death2_ref.ref)
else:
death2 = Event()
value = self.date_match(birth1.get_date_object(),
birth2.get_date_object())
if value == -1 :
return -1
chance += value
value = self.date_match(death1.get_date_object(),
death2.get_date_object())
if value == -1 :
return -1
chance += value
value = self.place_match(birth1.get_place_handle(),
birth2.get_place_handle())
if value == -1 :
return -1
chance += value
value = self.place_match(death1.get_place_handle(),
death2.get_place_handle())
if value == -1 :
return -1
chance += value
ancestors = []
self.ancestors_of(p1.get_handle(),ancestors)
if p2.get_handle() in ancestors:
return -1
ancestors = []
self.ancestors_of(p2.get_handle(),ancestors)
if p1.get_handle() in ancestors:
return -1
f1_id = p1.get_main_parents_family_handle()
f2_id = p2.get_main_parents_family_handle()
if f1_id and f2_id:
f1 = self.db.get_family_from_handle(f1_id)
f2 = self.db.get_family_from_handle(f2_id)
dad1_id = f1.get_father_handle()
if dad1_id:
dad1 = get_name_obj(self.db.get_person_from_handle(dad1_id))
else:
dad1 = None
dad2_id = f2.get_father_handle()
if dad2_id:
dad2 = get_name_obj(self.db.get_person_from_handle(dad2_id))
else:
dad2 = None
value = self.name_match(dad1,dad2)
if value == -1:
return -1
chance += value
mom1_id = f1.get_mother_handle()
if mom1_id:
mom1 = get_name_obj(self.db.get_person_from_handle(mom1_id))
else:
mom1 = None
mom2_id = f2.get_mother_handle()
if mom2_id:
mom2 = get_name_obj(self.db.get_person_from_handle(mom2_id))
else:
mom2 = None
value = self.name_match(mom1,mom2)
if value == -1:
return -1
chance += value
for f1_id in p1.get_family_handle_list():
f1 = self.db.get_family_from_handle(f1_id)
for f2_id in p2.get_family_handle_list():
f2 = self.db.get_family_from_handle(f2_id)
if p1.get_gender() == Person.FEMALE:
father1_id = f1.get_father_handle()
father2_id = f2.get_father_handle()
if father1_id and father2_id:
if father1_id == father2_id:
chance += 1
else:
father1 = self.db.get_person_from_handle(father1_id)
father2 = self.db.get_person_from_handle(father2_id)
fname1 = get_name_obj(father1)
fname2 = get_name_obj(father2)
value = self.name_match(fname1,fname2)
if value != -1:
chance += value
else:
mother1_id = f1.get_mother_handle()
mother2_id = f2.get_mother_handle()
if mother1_id and mother2_id:
if mother1_id == mother2_id:
chance += 1
else:
mother1 = self.db.get_person_from_handle(mother1_id)
mother2 = self.db.get_person_from_handle(mother2_id)
mname1 = get_name_obj(mother1)
mname2 = get_name_obj(mother2)
value = self.name_match(mname1,mname2)
if value != -1:
chance += value
return chance
def name_compare(self, s1, s2):
if self.use_soundex:
try:
return compare(s1,s2)
except UnicodeEncodeError:
return s1 == s2
else:
return s1 == s2
def date_match(self, date1, date2):
if date1.is_empty() or date2.is_empty():
return 0
if date1.is_equal(date2):
return 1
if date1.is_compound() or date2.is_compound():
return self.range_compare(date1,date2)
if date1.get_year() == date2.get_year():
if date1.get_month() == date2.get_month():
return 0.75
if not date1.get_month_valid() or not date2.get_month_valid():
return 0.75
else:
return -1
else:
return -1
def range_compare(self, date1, date2):
start_date_1 = date1.get_start_date()[0:3]
start_date_2 = date2.get_start_date()[0:3]
stop_date_1 = date1.get_stop_date()[0:3]
stop_date_2 = date2.get_stop_date()[0:3]
if date1.is_compound() and date2.is_compound():
if (start_date_2 <= start_date_1 <= stop_date_2 or
start_date_1 <= start_date_2 <= stop_date_1 or
start_date_2 <= stop_date_1 <= stop_date_2 or
start_date_1 <= stop_date_2 <= stop_date_1):
return 0.5
else:
return -1
elif date2.is_compound():
if start_date_2 <= start_date_1 <= stop_date_2:
return 0.5
else:
return -1
else:
if start_date_1 <= start_date_2 <= stop_date_1:
return 0.5
else:
return -1
def name_match(self, name, name1):
if not name1 or not name:
return 0
srn1 = get_surnames(name)
sfx1 = name.get_suffix()
srn2 = get_surnames(name1)
sfx2 = name1.get_suffix()
if not self.name_compare(srn1,srn2):
return -1
if sfx1 != sfx2:
if sfx1 != "" and sfx2 != "":
return -1
if name.get_first_name() == name1.get_first_name():
return 1
else:
list1 = name.get_first_name().split()
list2 = name1.get_first_name().split()
if len(list1) < len(list2):
return self.list_reduce(list1,list2)
else:
return self.list_reduce(list2,list1)
def place_match(self, p1_id, p2_id):
if p1_id == p2_id:
return 1
if not p1_id:
name1 = ""
else:
p1 = self.db.get_place_from_handle(p1_id)
name1 = p1.get_title()
if not p2_id:
name2 = ""
else:
p2 = self.db.get_place_from_handle(p2_id)
name2 = p2.get_title()
if not (name1 and name2):
return 0
if name1 == name2:
return 1
list1 = name1.replace(","," ").split()
list2 = name2.replace(","," ").split()
value = 0
for name in list1:
for name2 in list2:
if name == name2:
value += 0.5
elif name[0] == name2[0] and self.name_compare(name, name2):
value += 0.25
return min(value,1) if value else -1
def list_reduce(self, list1, list2):
value = 0
for name in list1:
for name2 in list2:
if is_initial(name) and name[0] == name2[0]:
value += 0.25
elif is_initial(name2) and name2[0] == name[0]:
value += 0.25
elif name == name2:
value += 0.5
elif name[0] == name2[0] and self.name_compare(name, name2):
value += 0.25
return min(value,1) if value else -1
def __dummy(self, obj):
"""dummy callback, needed because a shared glade file is used for
both toplevel windows and all signals must be handled.
"""
pass
class DuplicatePeopleToolMatches(ManagedWindow):
def __init__(self, dbstate, uistate, track, the_list, the_map, callback):
ManagedWindow.__init__(self,uistate,track,self.__class__)
self.dellist = set()
self.list = the_list
self.map = the_map
self.length = len(self.list)
self.update = callback
self.db = dbstate.db
self.dbstate = dbstate
self.uistate = uistate
top = Glade(toplevel="mergelist")
window = top.toplevel
self.set_window(window, top.get_object('title'),
_('Potential Merges'))
self.setup_configs('interface.duplicatepeopletoolmatches', 500, 350)
self.mlist = top.get_object("mlist")
top.connect_signals({
"destroy_passed_object" : self.close,
"on_do_merge_clicked" : self.on_do_merge_clicked,
"on_help_show_clicked" : self.on_help_clicked,
"on_delete_show_event" : self.close,
"on_merge_ok_clicked" : self.__dummy,
"on_help_clicked" : self.__dummy,
"on_delete_merge_event" : self.__dummy,
"on_delete_event" : self.__dummy,
})
self.db.connect("person-delete", self.person_delete)
mtitles = [
(_('Rating'),3,75),
(_('First Person'),1,200),
(_('Second Person'),2,200),
('',-1,0)
]
self.list = ListModel(self.mlist,mtitles,
event_func=self.on_do_merge_clicked)
self.redraw()
self.show()
def build_menu_names(self, obj):
return (_("Merge candidates"), _("Merge persons"))
def on_help_clicked(self, obj):
"""Display the relevant portion of Gramps manual"""
display_help(WIKI_HELP_PAGE , WIKI_HELP_SEC)
def redraw(self):
list = []
for p1key, p1data in self.map.items():
if p1key in self.dellist:
continue
(p2key,c) = p1data
if p2key in self.dellist:
continue
if p1key == p2key:
continue
list.append((c,p1key,p2key))
self.list.clear()
for (c,p1key,p2key) in list:
c1 = "%5.2f" % c
c2 = "%5.2f" % (100-c)
p1 = self.db.get_person_from_handle(p1key)
p2 = self.db.get_person_from_handle(p2key)
if not p1 or not p2:
continue
pn1 = name_displayer.display(p1)
pn2 = name_displayer.display(p2)
self.list.add([c1, pn1, pn2,c2],(p1key,p2key))
def on_do_merge_clicked(self, obj):
store,iter = self.list.selection.get_selected()
if not iter:
return
(self.p1,self.p2) = self.list.get_object(iter)
MergePerson(self.dbstate, self.uistate, self.track, self.p1, self.p2,
self.on_update, True)
def on_update(self):
if self.db.has_person_handle(self.p1):
titanic = self.p2
else:
titanic = self.p1
self.dellist.add(titanic)
self.update()
self.redraw()
def update_and_destroy(self, obj):
self.update(1)
self.close()
def person_delete(self, handle_list):
""" deal with person deletes outside of the tool """
self.dellist.update(handle_list)
self.redraw()
def __dummy(self, obj):
"""dummy callback, needed because a shared glade file is used for
both toplevel windows and all signals must be handled.
"""
pass
#-------------------------------------------------------------------------
#
#
#
#-------------------------------------------------------------------------
def name_of(p):
if not p:
return ""
return "%s (%s)" % (name_displayer.display(p),p.get_handle())
def get_name_obj(person):
if person:
return person.get_primary_name()
else:
return None
def get_surnames(name):
"""Construct a full surname of the surnames"""
return ' '.join([surn.get_surname() for surn in name.get_surname_list()])
#------------------------------------------------------------------------
#
#
#
#------------------------------------------------------------------------
class DuplicatePeopleToolOptions(tool.ToolOptions):
"""
Defines options and provides handling interface.
"""
def __init__(self, name,person_id=None):
tool.ToolOptions.__init__(self, name,person_id)
# Options specific for this report
self.options_dict = {
'soundex' : 1,
'threshold' : 0.25,
}
self.options_help = {
'soundex' : ("=0/1","Whether to use SoundEx codes",
["Do not use SoundEx","Use SoundEx"],
True),
'threshold' : ("=num","Threshold for tolerance",
"Floating point number")
}
|
dermoth/gramps
|
gramps/plugins/tool/finddupes.py
|
Python
|
gpl-2.0
| 23,285
|
[
"Brian"
] |
ede533ce8fbe87942d9fd6dc415733fcf025a4efe5675d260c8f985b0a5b9a3f
|
#!/usr/bin/python
import httplib
import httplib2
import os
import random
import sys
import time
from apiclient.discovery import build
from apiclient.errors import HttpError
from apiclient.http import MediaFileUpload
from oauth2client.client import flow_from_clientsecrets
from oauth2client.file import Storage
from oauth2client.tools import argparser, run_flow
# Explicitly tell the underlying HTTP transport library not to retry, since
# we are handling retry logic ourselves.
httplib2.RETRIES = 1
# Maximum number of times to retry before giving up.
MAX_RETRIES = 10
# Always retry when these exceptions are raised.
RETRIABLE_EXCEPTIONS = (httplib2.HttpLib2Error, IOError, httplib.NotConnected,
httplib.IncompleteRead, httplib.ImproperConnectionState,
httplib.CannotSendRequest, httplib.CannotSendHeader,
httplib.ResponseNotReady, httplib.BadStatusLine)
# Always retry when an apiclient.errors.HttpError with one of these status
# codes is raised.
RETRIABLE_STATUS_CODES = [500, 502, 503, 504]
# The CLIENT_SECRETS_FILE variable specifies the name of a file that contains
# the OAuth 2.0 information for this application, including its client_id and
# client_secret. You can acquire an OAuth 2.0 client ID and client secret from
# the {{ Google Cloud Console }} at
# {{ https://cloud.google.com/console }}.
# Please ensure that you have enabled the YouTube Data API for your project.
# For more information about using OAuth2 to access the YouTube Data API, see:
# https://developers.google.com/youtube/v3/guides/authentication
# For more information about the client_secrets.json file format, see:
# https://developers.google.com/api-client-library/python/guide/aaa_client_secrets
CLIENT_SECRETS_FILE = "client_secrets.json"
# This OAuth 2.0 access scope allows an application to upload files to the
# authenticated user's YouTube channel, but doesn't allow other types of access.
YOUTUBE_UPLOAD_SCOPE = "https://www.googleapis.com/auth/youtube.upload"
YOUTUBE_API_SERVICE_NAME = "youtube"
YOUTUBE_API_VERSION = "v3"
# This variable defines a message to display if the CLIENT_SECRETS_FILE is
# missing.
MISSING_CLIENT_SECRETS_MESSAGE = """
WARNING: Please configure OAuth 2.0
To make this sample run you will need to populate the client_secrets.json file
found at:
%s
with information from the {{ Cloud Console }}
{{ https://cloud.google.com/console }}
For more information about the client_secrets.json file format, please visit:
https://developers.google.com/api-client-library/python/guide/aaa_client_secrets
""" % os.path.abspath(os.path.join(os.path.dirname(__file__),
CLIENT_SECRETS_FILE))
VALID_PRIVACY_STATUSES = ("public", "private", "unlisted")
def get_authenticated_service(args):
flow = flow_from_clientsecrets(CLIENT_SECRETS_FILE,
scope=YOUTUBE_UPLOAD_SCOPE,
message=MISSING_CLIENT_SECRETS_MESSAGE)
storage = Storage("%s-oauth2.json" % sys.argv[0])
credentials = storage.get()
if credentials is None or credentials.invalid:
credentials = run_flow(flow, storage, args)
return build(YOUTUBE_API_SERVICE_NAME, YOUTUBE_API_VERSION,
http=credentials.authorize(httplib2.Http()))
def initialize_upload(youtube, options):
tags = None
if options.keywords:
tags = options.keywords.split(",")
body=dict(
snippet=dict(
title=options.title,
description=options.description,
tags=tags,
categoryId=options.category
),
status=dict(
privacyStatus=options.privacyStatus
)
)
# Call the API's videos.insert method to create and upload the video.
insert_request = youtube.videos().insert(
part=",".join(body.keys()),
body=body,
# The chunksize parameter specifies the size of each chunk of data, in
# bytes, that will be uploaded at a time. Set a higher value for
# reliable connections as fewer chunks lead to faster uploads. Set a lower
# value for better recovery on less reliable connections.
#
# Setting "chunksize" equal to -1 in the code below means that the entire
# file will be uploaded in a single HTTP request. (If the upload fails,
# it will still be retried where it left off.) This is usually a best
# practice, but if you're using Python older than 2.6 or if you're
# running on App Engine, you should set the chunksize to something like
# 1024 * 1024 (1 megabyte).
media_body=MediaFileUpload(options.file, chunksize=-1, resumable=True)
)
resumable_upload(insert_request)
# This method implements an exponential backoff strategy to resume a
# failed upload.
def resumable_upload(insert_request):
response = None
error = None
retry = 0
while response is None:
try:
print "Uploading file..."
status, response = insert_request.next_chunk()
if response is not None:
if 'id' in response:
print "Video id '%s' was successfully uploaded." % response['id']
print response['id']
else:
exit("The upload failed with an unexpected response: %s" % response)
except HttpError, e:
if e.resp.status in RETRIABLE_STATUS_CODES:
error = "A retriable HTTP error %d occurred:\n%s" % (e.resp.status,
e.content)
else:
raise
except RETRIABLE_EXCEPTIONS, e:
error = "A retriable error occurred: %s" % e
if error is not None:
print error
retry += 1
if retry > MAX_RETRIES:
exit("No longer attempting to retry.")
max_sleep = 2 ** retry
sleep_seconds = random.random() * max_sleep
print "Sleeping %f seconds and then retrying..." % sleep_seconds
time.sleep(sleep_seconds)
if __name__ == '__main__':
argparser.add_argument("--file", required=True, help="Video file to upload")
argparser.add_argument("--title", help="Video title", default="Test Title")
argparser.add_argument("--description", help="Video description",
default="Test Description")
argparser.add_argument("--category", default="22",
help="Numeric video category. " +
"See https://developers.google.com/youtube/v3/docs/videoCategories/list")
argparser.add_argument("--keywords", help="Video keywords, comma separated",
default="")
argparser.add_argument("--privacyStatus", choices=VALID_PRIVACY_STATUSES,
default=VALID_PRIVACY_STATUSES[0], help="Video privacy status.")
args = argparser.parse_args()
if not os.path.exists(args.file):
exit("Please specify a valid file using the --file= parameter.")
youtube = get_authenticated_service(args)
try:
initialize_upload(youtube, args)
except HttpError, e:
print "An HTTP error %d occurred:\n%s" % (e.resp.status, e.content)
|
deathmetalland/IkaLog
|
upload_video.py
|
Python
|
apache-2.0
| 6,729
|
[
"VisIt"
] |
a7f33c858113e1527a7e5ce9c9828ee5ae7228a86f73cdc8c193f16f9c1e7988
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
import uuid
class Migration(migrations.Migration):
dependencies = [
('visit', '0070_visit_siblings_under_5_present'),
]
operations = [
migrations.AlterModelOptions(
name='participanttype',
options={'ordering': ('index',)},
),
migrations.AddField(
model_name='participanttype',
name='index',
field=models.PositiveSmallIntegerField(default=10),
),
migrations.AddField(
model_name='participanttype',
name='is_active',
field=models.BooleanField(default=True),
),
migrations.AddField(
model_name='participanttype',
name='is_custom',
field=models.BooleanField(default=False),
),
migrations.AddField(
model_name='participanttype',
name='key',
field=models.UUIDField(default=uuid.uuid4, null=True),
),
]
|
koebbe/homeworks
|
visit/migrations/0071_auto_20150826_2053.py
|
Python
|
mit
| 1,078
|
[
"VisIt"
] |
5d94886c91c2cf49a9308870feb6902bb10062412fa745d655c36a9eee9165e7
|
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import sys
from functools import reduce
import numpy as np
from pyspark import SparkContext, since
from pyspark.mllib.common import callMLlibFunc, inherit_doc
from pyspark.mllib.linalg import Vectors, SparseVector, _convert_to_vector
from pyspark.sql import DataFrame
from typing import Generic, Iterable, List, Optional, Tuple, Type, TypeVar, cast, TYPE_CHECKING
from pyspark.context import SparkContext
from pyspark.mllib.linalg import Vector
from pyspark.rdd import RDD
from pyspark.sql.dataframe import DataFrame
T = TypeVar("T")
L = TypeVar("L", bound="Loader")
JL = TypeVar("JL", bound="JavaLoader")
if TYPE_CHECKING:
from pyspark.mllib._typing import VectorLike
from py4j.java_gateway import JavaObject
from pyspark.mllib.regression import LabeledPoint
class MLUtils:
"""
Helper methods to load, save and pre-process data used in MLlib.
.. versionadded:: 1.0.0
"""
@staticmethod
def _parse_libsvm_line(line: str) -> Tuple[float, np.ndarray, np.ndarray]:
"""
Parses a line in LIBSVM format into (label, indices, values).
"""
items = line.split(None)
label = float(items[0])
nnz = len(items) - 1
indices = np.zeros(nnz, dtype=np.int32)
values = np.zeros(nnz)
for i in range(nnz):
index, value = items[1 + i].split(":")
indices[i] = int(index) - 1
values[i] = float(value)
return label, indices, values
@staticmethod
def _convert_labeled_point_to_libsvm(p: "LabeledPoint") -> str:
"""Converts a LabeledPoint to a string in LIBSVM format."""
from pyspark.mllib.regression import LabeledPoint
assert isinstance(p, LabeledPoint)
items = [str(p.label)]
v = _convert_to_vector(p.features)
if isinstance(v, SparseVector):
nnz = len(v.indices)
for i in range(nnz):
items.append(str(v.indices[i] + 1) + ":" + str(v.values[i]))
else:
for i in range(len(v)):
items.append(str(i + 1) + ":" + str(v[i])) # type: ignore[index]
return " ".join(items)
@staticmethod
def loadLibSVMFile(
sc: SparkContext, path: str, numFeatures: int = -1, minPartitions: Optional[int] = None
) -> RDD["LabeledPoint"]:
"""
Loads labeled data in the LIBSVM format into an RDD of
LabeledPoint. The LIBSVM format is a text-based format used by
LIBSVM and LIBLINEAR. Each line represents a labeled sparse
feature vector using the following format:
label index1:value1 index2:value2 ...
where the indices are one-based and in ascending order. This
method parses each line into a LabeledPoint, where the feature
indices are converted to zero-based.
.. versionadded:: 1.0.0
Parameters
----------
sc : :py:class:`pyspark.SparkContext`
Spark context
path : str
file or directory path in any Hadoop-supported file system URI
numFeatures : int, optional
number of features, which will be determined
from the input data if a nonpositive value
is given. This is useful when the dataset is
already split into multiple files and you
want to load them separately, because some
features may not present in certain files,
which leads to inconsistent feature
dimensions.
minPartitions : int, optional
min number of partitions
Returns
-------
:py:class:`pyspark.RDD`
labeled data stored as an RDD of LabeledPoint
Examples
--------
>>> from tempfile import NamedTemporaryFile
>>> from pyspark.mllib.util import MLUtils
>>> from pyspark.mllib.regression import LabeledPoint
>>> tempFile = NamedTemporaryFile(delete=True)
>>> _ = tempFile.write(b"+1 1:1.0 3:2.0 5:3.0\\n-1\\n-1 2:4.0 4:5.0 6:6.0")
>>> tempFile.flush()
>>> examples = MLUtils.loadLibSVMFile(sc, tempFile.name).collect()
>>> tempFile.close()
>>> examples[0]
LabeledPoint(1.0, (6,[0,2,4],[1.0,2.0,3.0]))
>>> examples[1]
LabeledPoint(-1.0, (6,[],[]))
>>> examples[2]
LabeledPoint(-1.0, (6,[1,3,5],[4.0,5.0,6.0]))
"""
from pyspark.mllib.regression import LabeledPoint
lines = sc.textFile(path, minPartitions)
parsed = lines.map(lambda l: MLUtils._parse_libsvm_line(l))
if numFeatures <= 0:
parsed.cache()
numFeatures = parsed.map(lambda x: -1 if x[1].size == 0 else x[1][-1]).reduce(max) + 1
return parsed.map(
lambda x: LabeledPoint(
x[0], Vectors.sparse(numFeatures, x[1], x[2]) # type: ignore[arg-type]
)
)
@staticmethod
def saveAsLibSVMFile(data: RDD["LabeledPoint"], dir: str) -> None:
"""
Save labeled data in LIBSVM format.
.. versionadded:: 1.0.0
Parameters
----------
data : :py:class:`pyspark.RDD`
an RDD of LabeledPoint to be saved
dir : str
directory to save the data
Examples
--------
>>> from tempfile import NamedTemporaryFile
>>> from fileinput import input
>>> from pyspark.mllib.regression import LabeledPoint
>>> from glob import glob
>>> from pyspark.mllib.util import MLUtils
>>> examples = [LabeledPoint(1.1, Vectors.sparse(3, [(0, 1.23), (2, 4.56)])),
... LabeledPoint(0.0, Vectors.dense([1.01, 2.02, 3.03]))]
>>> tempFile = NamedTemporaryFile(delete=True)
>>> tempFile.close()
>>> MLUtils.saveAsLibSVMFile(sc.parallelize(examples), tempFile.name)
>>> ''.join(sorted(input(glob(tempFile.name + "/part-0000*"))))
'0.0 1:1.01 2:2.02 3:3.03\\n1.1 1:1.23 3:4.56\\n'
"""
lines = data.map(lambda p: MLUtils._convert_labeled_point_to_libsvm(p))
lines.saveAsTextFile(dir)
@staticmethod
def loadLabeledPoints(
sc: SparkContext, path: str, minPartitions: Optional[int] = None
) -> RDD["LabeledPoint"]:
"""
Load labeled points saved using RDD.saveAsTextFile.
.. versionadded:: 1.0.0
Parameters
----------
sc : :py:class:`pyspark.SparkContext`
Spark context
path : str
file or directory path in any Hadoop-supported file system URI
minPartitions : int, optional
min number of partitions
Returns
-------
:py:class:`pyspark.RDD`
labeled data stored as an RDD of LabeledPoint
Examples
--------
>>> from tempfile import NamedTemporaryFile
>>> from pyspark.mllib.util import MLUtils
>>> from pyspark.mllib.regression import LabeledPoint
>>> examples = [LabeledPoint(1.1, Vectors.sparse(3, [(0, -1.23), (2, 4.56e-7)])),
... LabeledPoint(0.0, Vectors.dense([1.01, 2.02, 3.03]))]
>>> tempFile = NamedTemporaryFile(delete=True)
>>> tempFile.close()
>>> sc.parallelize(examples, 1).saveAsTextFile(tempFile.name)
>>> MLUtils.loadLabeledPoints(sc, tempFile.name).collect()
[LabeledPoint(1.1, (3,[0,2],[-1.23,4.56e-07])), LabeledPoint(0.0, [1.01,2.02,3.03])]
"""
minPartitions = minPartitions or min(sc.defaultParallelism, 2)
return callMLlibFunc("loadLabeledPoints", sc, path, minPartitions)
@staticmethod
@since("1.5.0")
def appendBias(data: Vector) -> Vector:
"""
Returns a new vector with `1.0` (bias) appended to
the end of the input vector.
"""
vec = _convert_to_vector(data)
if isinstance(vec, SparseVector):
newIndices = np.append(vec.indices, len(vec))
newValues = np.append(vec.values, 1.0)
return SparseVector(len(vec) + 1, newIndices, newValues)
else:
return _convert_to_vector(np.append(vec.toArray(), 1.0))
@staticmethod
@since("1.5.0")
def loadVectors(sc: SparkContext, path: str) -> RDD[Vector]:
"""
Loads vectors saved using `RDD[Vector].saveAsTextFile`
with the default number of partitions.
"""
return callMLlibFunc("loadVectors", sc, path)
@staticmethod
def convertVectorColumnsToML(dataset: DataFrame, *cols: str) -> DataFrame:
"""
Converts vector columns in an input DataFrame from the
:py:class:`pyspark.mllib.linalg.Vector` type to the new
:py:class:`pyspark.ml.linalg.Vector` type under the `spark.ml`
package.
.. versionadded:: 2.0.0
Parameters
----------
dataset : :py:class:`pyspark.sql.DataFrame`
input dataset
\\*cols : str
Vector columns to be converted.
New vector columns will be ignored. If unspecified, all old
vector columns will be converted excepted nested ones.
Returns
-------
:py:class:`pyspark.sql.DataFrame`
the input dataset with old vector columns converted to the
new vector type
Examples
--------
>>> import pyspark
>>> from pyspark.mllib.linalg import Vectors
>>> from pyspark.mllib.util import MLUtils
>>> df = spark.createDataFrame(
... [(0, Vectors.sparse(2, [1], [1.0]), Vectors.dense(2.0, 3.0))],
... ["id", "x", "y"])
>>> r1 = MLUtils.convertVectorColumnsToML(df).first()
>>> isinstance(r1.x, pyspark.ml.linalg.SparseVector)
True
>>> isinstance(r1.y, pyspark.ml.linalg.DenseVector)
True
>>> r2 = MLUtils.convertVectorColumnsToML(df, "x").first()
>>> isinstance(r2.x, pyspark.ml.linalg.SparseVector)
True
>>> isinstance(r2.y, pyspark.mllib.linalg.DenseVector)
True
"""
if not isinstance(dataset, DataFrame):
raise TypeError("Input dataset must be a DataFrame but got {}.".format(type(dataset)))
return callMLlibFunc("convertVectorColumnsToML", dataset, list(cols))
@staticmethod
def convertVectorColumnsFromML(dataset: DataFrame, *cols: str) -> DataFrame:
"""
Converts vector columns in an input DataFrame to the
:py:class:`pyspark.mllib.linalg.Vector` type from the new
:py:class:`pyspark.ml.linalg.Vector` type under the `spark.ml`
package.
.. versionadded:: 2.0.0
Parameters
----------
dataset : :py:class:`pyspark.sql.DataFrame`
input dataset
\\*cols : str
Vector columns to be converted.
Old vector columns will be ignored. If unspecified, all new
vector columns will be converted except nested ones.
Returns
-------
:py:class:`pyspark.sql.DataFrame`
the input dataset with new vector columns converted to the
old vector type
Examples
--------
>>> import pyspark
>>> from pyspark.ml.linalg import Vectors
>>> from pyspark.mllib.util import MLUtils
>>> df = spark.createDataFrame(
... [(0, Vectors.sparse(2, [1], [1.0]), Vectors.dense(2.0, 3.0))],
... ["id", "x", "y"])
>>> r1 = MLUtils.convertVectorColumnsFromML(df).first()
>>> isinstance(r1.x, pyspark.mllib.linalg.SparseVector)
True
>>> isinstance(r1.y, pyspark.mllib.linalg.DenseVector)
True
>>> r2 = MLUtils.convertVectorColumnsFromML(df, "x").first()
>>> isinstance(r2.x, pyspark.mllib.linalg.SparseVector)
True
>>> isinstance(r2.y, pyspark.ml.linalg.DenseVector)
True
"""
if not isinstance(dataset, DataFrame):
raise TypeError("Input dataset must be a DataFrame but got {}.".format(type(dataset)))
return callMLlibFunc("convertVectorColumnsFromML", dataset, list(cols))
@staticmethod
def convertMatrixColumnsToML(dataset: DataFrame, *cols: str) -> DataFrame:
"""
Converts matrix columns in an input DataFrame from the
:py:class:`pyspark.mllib.linalg.Matrix` type to the new
:py:class:`pyspark.ml.linalg.Matrix` type under the `spark.ml`
package.
.. versionadded:: 2.0.0
Parameters
----------
dataset : :py:class:`pyspark.sql.DataFrame`
input dataset
\\*cols : str
Matrix columns to be converted.
New matrix columns will be ignored. If unspecified, all old
matrix columns will be converted excepted nested ones.
Returns
-------
:py:class:`pyspark.sql.DataFrame`
the input dataset with old matrix columns converted to the
new matrix type
Examples
--------
>>> import pyspark
>>> from pyspark.mllib.linalg import Matrices
>>> from pyspark.mllib.util import MLUtils
>>> df = spark.createDataFrame(
... [(0, Matrices.sparse(2, 2, [0, 2, 3], [0, 1, 1], [2, 3, 4]),
... Matrices.dense(2, 2, range(4)))], ["id", "x", "y"])
>>> r1 = MLUtils.convertMatrixColumnsToML(df).first()
>>> isinstance(r1.x, pyspark.ml.linalg.SparseMatrix)
True
>>> isinstance(r1.y, pyspark.ml.linalg.DenseMatrix)
True
>>> r2 = MLUtils.convertMatrixColumnsToML(df, "x").first()
>>> isinstance(r2.x, pyspark.ml.linalg.SparseMatrix)
True
>>> isinstance(r2.y, pyspark.mllib.linalg.DenseMatrix)
True
"""
if not isinstance(dataset, DataFrame):
raise TypeError("Input dataset must be a DataFrame but got {}.".format(type(dataset)))
return callMLlibFunc("convertMatrixColumnsToML", dataset, list(cols))
@staticmethod
def convertMatrixColumnsFromML(dataset: DataFrame, *cols: str) -> DataFrame:
"""
Converts matrix columns in an input DataFrame to the
:py:class:`pyspark.mllib.linalg.Matrix` type from the new
:py:class:`pyspark.ml.linalg.Matrix` type under the `spark.ml`
package.
.. versionadded:: 2.0.0
Parameters
----------
dataset : :py:class:`pyspark.sql.DataFrame`
input dataset
\\*cols : str
Matrix columns to be converted.
Old matrix columns will be ignored. If unspecified, all new
matrix columns will be converted except nested ones.
Returns
-------
:py:class:`pyspark.sql.DataFrame`
the input dataset with new matrix columns converted to the
old matrix type
Examples
--------
>>> import pyspark
>>> from pyspark.ml.linalg import Matrices
>>> from pyspark.mllib.util import MLUtils
>>> df = spark.createDataFrame(
... [(0, Matrices.sparse(2, 2, [0, 2, 3], [0, 1, 1], [2, 3, 4]),
... Matrices.dense(2, 2, range(4)))], ["id", "x", "y"])
>>> r1 = MLUtils.convertMatrixColumnsFromML(df).first()
>>> isinstance(r1.x, pyspark.mllib.linalg.SparseMatrix)
True
>>> isinstance(r1.y, pyspark.mllib.linalg.DenseMatrix)
True
>>> r2 = MLUtils.convertMatrixColumnsFromML(df, "x").first()
>>> isinstance(r2.x, pyspark.mllib.linalg.SparseMatrix)
True
>>> isinstance(r2.y, pyspark.ml.linalg.DenseMatrix)
True
"""
if not isinstance(dataset, DataFrame):
raise TypeError("Input dataset must be a DataFrame but got {}.".format(type(dataset)))
return callMLlibFunc("convertMatrixColumnsFromML", dataset, list(cols))
class Saveable:
"""
Mixin for models and transformers which may be saved as files.
.. versionadded:: 1.3.0
"""
def save(self, sc: SparkContext, path: str) -> None:
"""
Save this model to the given path.
This saves:
* human-readable (JSON) model metadata to path/metadata/
* Parquet formatted data to path/data/
The model may be loaded using :py:meth:`Loader.load`.
Parameters
----------
sc : :py:class:`pyspark.SparkContext`
Spark context used to save model data.
path : str
Path specifying the directory in which to save
this model. If the directory already exists,
this method throws an exception.
"""
raise NotImplementedError
@inherit_doc
class JavaSaveable(Saveable):
"""
Mixin for models that provide save() through their Scala
implementation.
.. versionadded:: 1.3.0
"""
_java_model: "JavaObject"
@since("1.3.0")
def save(self, sc: SparkContext, path: str) -> None:
"""Save this model to the given path."""
if not isinstance(sc, SparkContext):
raise TypeError("sc should be a SparkContext, got type %s" % type(sc))
if not isinstance(path, str):
raise TypeError("path should be a string, got type %s" % type(path))
self._java_model.save(sc._jsc.sc(), path)
class Loader(Generic[T]):
"""
Mixin for classes which can load saved models from files.
.. versionadded:: 1.3.0
"""
@classmethod
def load(cls: Type[L], sc: SparkContext, path: str) -> L:
"""
Load a model from the given path. The model should have been
saved using :py:meth:`Saveable.save`.
Parameters
----------
sc : :py:class:`pyspark.SparkContext`
Spark context used for loading model files.
path : str
Path specifying the directory to which the model was saved.
Returns
-------
object
model instance
"""
raise NotImplementedError
@inherit_doc
class JavaLoader(Loader[T]):
"""
Mixin for classes which can load saved models using its Scala
implementation.
.. versionadded:: 1.3.0
"""
@classmethod
def _java_loader_class(cls) -> str:
"""
Returns the full class name of the Java loader. The default
implementation replaces "pyspark" by "org.apache.spark" in
the Python full class name.
"""
java_package = cls.__module__.replace("pyspark", "org.apache.spark")
return ".".join([java_package, cls.__name__])
@classmethod
def _load_java(cls, sc: SparkContext, path: str) -> "JavaObject":
"""
Load a Java model from the given path.
"""
java_class = cls._java_loader_class()
java_obj: "JavaObject" = reduce(getattr, java_class.split("."), sc._jvm)
return java_obj.load(sc._jsc.sc(), path)
@classmethod
@since("1.3.0")
def load(cls: Type[JL], sc: SparkContext, path: str) -> JL:
"""Load a model from the given path."""
java_model = cls._load_java(sc, path)
return cls(java_model) # type: ignore[call-arg]
class LinearDataGenerator:
"""Utils for generating linear data.
.. versionadded:: 1.5.0
"""
@staticmethod
def generateLinearInput(
intercept: float,
weights: "VectorLike",
xMean: "VectorLike",
xVariance: "VectorLike",
nPoints: int,
seed: int,
eps: float,
) -> List["LabeledPoint"]:
"""
.. versionadded:: 1.5.0
Parameters
----------
intercept : float
bias factor, the term c in X'w + c
weights : :py:class:`pyspark.mllib.linalg.Vector` or convertible
feature vector, the term w in X'w + c
xMean : :py:class:`pyspark.mllib.linalg.Vector` or convertible
Point around which the data X is centered.
xVariance : :py:class:`pyspark.mllib.linalg.Vector` or convertible
Variance of the given data
nPoints : int
Number of points to be generated
seed : int
Random Seed
eps : float
Used to scale the noise. If eps is set high,
the amount of gaussian noise added is more.
Returns
-------
list
of :py:class:`pyspark.mllib.regression.LabeledPoints` of length nPoints
"""
weights = [float(weight) for weight in cast(Iterable[float], weights)]
xMean = [float(mean) for mean in cast(Iterable[float], xMean)]
xVariance = [float(var) for var in cast(Iterable[float], xVariance)]
return list(
callMLlibFunc(
"generateLinearInputWrapper",
float(intercept),
weights,
xMean,
xVariance,
int(nPoints),
int(seed),
float(eps),
)
)
@staticmethod
@since("1.5.0")
def generateLinearRDD(
sc: SparkContext,
nexamples: int,
nfeatures: int,
eps: float,
nParts: int = 2,
intercept: float = 0.0,
) -> RDD["LabeledPoint"]:
"""
Generate an RDD of LabeledPoints.
"""
return callMLlibFunc(
"generateLinearRDDWrapper",
sc,
int(nexamples),
int(nfeatures),
float(eps),
int(nParts),
float(intercept),
)
def _test() -> None:
import doctest
from pyspark.sql import SparkSession
globs = globals().copy()
# The small batch size here ensures that we see multiple batches,
# even in these small test examples:
spark = SparkSession.builder.master("local[2]").appName("mllib.util tests").getOrCreate()
globs["spark"] = spark
globs["sc"] = spark.sparkContext
(failure_count, test_count) = doctest.testmod(globs=globs, optionflags=doctest.ELLIPSIS)
spark.stop()
if failure_count:
sys.exit(-1)
if __name__ == "__main__":
_test()
|
ueshin/apache-spark
|
python/pyspark/mllib/util.py
|
Python
|
apache-2.0
| 22,859
|
[
"Gaussian"
] |
59c2905bfc23972eaab48693b4902ac9060162055f9c315f7be8fcaff8fe1cf1
|
# -*- coding: utf-8 -*-
"""
Copyright (C) 2007-2008 Matthew Perry
Copyright (C) 2008-2010 Borys Jurgiel
/***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************/
"""
from PyQt4.QtCore import *
from PyQt4.QtGui import *
from qgis.core import QgsApplication, QgsContextHelp
import sys, time
from ui_qgsplugininstallerfetchingbase import Ui_QgsPluginInstallerFetchingDialogBase
from ui_qgsplugininstallerinstallingbase import Ui_QgsPluginInstallerInstallingDialogBase
from ui_qgsplugininstallerrepositorybase import Ui_QgsPluginInstallerRepositoryDetailsDialogBase
from ui_qgsplugininstallerpluginerrorbase import Ui_QgsPluginInstallerPluginErrorDialogBase
from ui_qgsplugininstallerbase import Ui_QgsPluginInstallerDialogBase
from installer_data import *
try:
from qgis.utils import startPlugin, unloadPlugin, loadPlugin # QGIS >= 1.4
from qgis.utils import reloadPlugin, updateAvailablePlugins # QGIS >= 1.5
except Exception:
pass
# --- common functions ------------------------------------------------------------------- #
def removeDir(path):
result = QString()
if not QFile(path).exists():
result = QCoreApplication.translate("QgsPluginInstaller","Nothing to remove! Plugin directory doesn't exist:")+"\n"+path
elif QFile(path).remove(): # if it is only link, just remove it without resolving.
pass
else:
fltr = QDir.Dirs | QDir.Files | QDir.Hidden
iterator = QDirIterator(path, fltr, QDirIterator.Subdirectories)
while iterator.hasNext():
item = iterator.next()
if QFile(item).remove():
pass
fltr = QDir.Dirs | QDir.Hidden
iterator = QDirIterator(path, fltr, QDirIterator.Subdirectories)
while iterator.hasNext():
item = iterator.next()
if QDir().rmpath(item):
pass
if QFile(path).exists():
result = QCoreApplication.translate("QgsPluginInstaller","Failed to remove the directory:")+"\n"+path+"\n"+QCoreApplication.translate("QgsPluginInstaller","Check permissions or remove it manually")
# restore plugin directory if removed by QDir().rmpath()
pluginDir = QFileInfo(QgsApplication.qgisUserDbFilePath()).path() + "/python/plugins"
if not QDir(pluginDir).exists():
QDir().mkpath(pluginDir)
return result
# --- /common functions ------------------------------------------------------------------ #
# --- class QgsPluginInstallerFetchingDialog --------------------------------------------------------------- #
class QgsPluginInstallerFetchingDialog(QDialog, Ui_QgsPluginInstallerFetchingDialogBase):
# ----------------------------------------- #
def __init__(self, parent):
QDialog.__init__(self, parent)
self.setupUi(self)
self.progressBar.setRange(0,len(repositories.allEnabled())*100)
self.itemProgress = {}
self.item = {}
for key in repositories.allEnabled():
self.item[key] = QTreeWidgetItem(self.treeWidget)
self.item[key].setText(0,key)
if repositories.all()[key]["state"] > 1:
self.itemProgress[key] = 100
self.displayState(key,0)
else:
self.itemProgress[key] = 0
self.displayState(key,2)
self.treeWidget.resizeColumnToContents(0)
QObject.connect(repositories, SIGNAL("repositoryFetched(QString)"), self.repositoryFetched)
QObject.connect(repositories, SIGNAL("anythingChanged(QString, int, int)"), self.displayState)
# ----------------------------------------- #
def displayState(self,key,state,state2=None):
messages=[self.tr("Success"),self.tr("Resolving host name..."),self.tr("Connecting..."),self.tr("Host connected. Sending request..."),self.tr("Downloading data..."),self.tr("Idle"),self.tr("Closing connection..."),self.tr("Error")]
message = messages[state]
if state2:
message += " (%s%%)" % state2
self.item[key].setText(1,message)
if state == 4 and state2:
self.itemProgress[key] = state2
totalProgress = sum(self.itemProgress.values())
self.progressBar.setValue(totalProgress)
# ----------------------------------------- #
def repositoryFetched(self, repoName):
self.itemProgress[repoName] = 100
if repositories.all()[repoName]["state"] == 2:
self.displayState(repoName,0)
else:
self.displayState(repoName,7)
if not repositories.fetchingInProgress():
self.close()
# --- /class QgsPluginInstallerFetchingDialog -------------------------------------------------------------- #
# --- class QgsPluginInstallerRepositoryDialog ------------------------------------------------------------- #
class QgsPluginInstallerRepositoryDialog(QDialog, Ui_QgsPluginInstallerRepositoryDetailsDialogBase):
# ----------------------------------------- #
def __init__(self, parent=None):
QDialog.__init__(self, parent)
self.setupUi(self)
self.editURL.setText("http://")
self.connect(self.editName, SIGNAL("textChanged(const QString &)"), self.textChanged)
self.connect(self.editURL, SIGNAL("textChanged(const QString &)"), self.textChanged)
self.textChanged(None)
# ----------------------------------------- #
def textChanged(self, string):
enable = (self.editName.text().count() > 0 and self.editURL.text().count() > 0)
self.buttonBox.button(QDialogButtonBox.Ok).setEnabled(enable)
# --- /class QgsPluginInstallerRepositoryDialog ------------------------------------------------------------ #
# --- class QgsPluginInstallerInstallingDialog --------------------------------------------------------------- #
class QgsPluginInstallerInstallingDialog(QDialog, Ui_QgsPluginInstallerInstallingDialogBase):
# ----------------------------------------- #
def __init__(self, parent, plugin):
QDialog.__init__(self, parent)
self.setupUi(self)
self.plugin = plugin
self.mResult = QString()
self.progressBar.setRange(0,0)
self.progressBar.setFormat(QString("%p%"))
self.labelName.setText(QString(plugin["name"]))
self.connect(self.buttonBox, SIGNAL("clicked(QAbstractButton*)"), self.abort)
url = QUrl(plugin["url"])
path = QString(url.toPercentEncoding(url.path(), "!$&'()*+,;=:/@"))
fileName = plugin["filename"]
tmpDir = QDir.tempPath()
tmpPath = QDir.cleanPath(tmpDir+"/"+fileName)
self.file = QFile(tmpPath)
port = url.port()
if port < 0:
port = 80
self.http = QPHttp(url.host(), port)
self.connect(self.http, SIGNAL("stateChanged ( int )"), self.stateChanged)
self.connect(self.http, SIGNAL("dataReadProgress ( int , int )"), self.readProgress)
self.connect(self.http, SIGNAL("requestFinished (int, bool)"), self.requestFinished)
self.httpGetId = self.http.get(path, self.file)
# ----------------------------------------- #
def result(self):
return self.mResult
# ----------------------------------------- #
def stateChanged(self, state):
messages=[self.tr("Installing..."),self.tr("Resolving host name..."),self.tr("Connecting..."),self.tr("Host connected. Sending request..."),self.tr("Downloading data..."),self.tr("Idle"),self.tr("Closing connection..."),self.tr("Error")]
self.labelState.setText(messages[state])
# ----------------------------------------- #
def readProgress(self, done, total):
self.progressBar.setMaximum(total)
self.progressBar.setValue(done)
# ----------------------------------------- #
def requestFinished(self, requestId, state):
if requestId != self.httpGetId:
return
self.buttonBox.setEnabled(False)
if state:
self.mResult = self.http.errorString()
self.reject()
return
self.file.close()
pluginDir = QFileInfo(QgsApplication.qgisUserDbFilePath()).path() + "/python/plugins"
tmpPath = self.file.fileName()
# make sure that the parent directory exists
if not QDir(pluginDir).exists():
QDir().mkpath(pluginDir)
# if the target directory already exists as a link, remove the link without resolving:
QFile(pluginDir+QString(QDir.separator())+self.plugin["localdir"]).remove()
try:
un = unzip()
un.extract(unicode(tmpPath), unicode(pluginDir)) # test extract. If fails, then exception will be raised and no removing occurs
# removing old plugin files if exist
removeDir(QDir.cleanPath(pluginDir+"/"+self.plugin["localdir"])) # remove old plugin if exists
un.extract(unicode(tmpPath), unicode(pluginDir)) # final extract.
except:
self.mResult = self.tr("Failed to unzip the plugin package. Probably it's broken or missing from the repository. You may also want to make sure that you have write permission to the plugin directory:") + "\n" + pluginDir
self.reject()
return
try:
# cleaning: removing the temporary zip file
QFile(tmpPath).remove()
except:
pass
self.close()
# ----------------------------------------- #
def abort(self):
self.http.abort()
self.mResult = self.tr("Aborted by user")
self.reject()
# --- /class QgsPluginInstallerInstallingDialog ------------------------------------------------------------- #
# --- class QgsPluginInstallerPluginErrorDialog -------------------------------------------------------------- #
class QgsPluginInstallerPluginErrorDialog(QDialog, Ui_QgsPluginInstallerPluginErrorDialogBase):
# ----------------------------------------- #
def __init__(self, parent, errorMessage):
QDialog.__init__(self, parent)
self.setupUi(self)
if not errorMessage:
errorMessage = self.tr("no error message received")
self.textBrowser.setText(errorMessage)
# --- /class QgsPluginInstallerPluginErrorDialog ------------------------------------------------------------- #
# --- class QgsPluginInstallerDialog ------------------------------------------------------------------------- #
class QgsPluginInstallerDialog(QDialog, Ui_QgsPluginInstallerDialogBase):
# ----------------------------------------- #
def __init__(self, parent, fl):
QDialog.__init__(self, parent, fl)
self.setupUi(self)
self.reposGroup = "/Qgis/plugin-repos"
self.connect(self.lineFilter, SIGNAL("textChanged (QString)"), self.filterChanged)
self.connect(self.comboFilter1, SIGNAL("currentIndexChanged (int)"), self.filterChanged)
self.connect(self.comboFilter2, SIGNAL("currentIndexChanged (int)"), self.filterChanged)
# grab clicks on trees
self.connect(self.treePlugins, SIGNAL("itemSelectionChanged()"), self.pluginTreeClicked)
self.connect(self.treeRepositories, SIGNAL("itemSelectionChanged()"), self.repositoryTreeClicked)
# buttons
self.connect(self.buttonUpgradeAll, SIGNAL("clicked()"), self.upgradeAllClicked)
self.connect(self.buttonInstall, SIGNAL("clicked()"), self.installPluginClicked)
self.connect(self.buttonUninstall, SIGNAL("clicked()"), self.uninstallPluginClicked)
self.buttonInstall.setEnabled(False)
self.buttonUninstall.setEnabled(False)
self.buttonHelp.setEnabled(QGIS_14)
self.connect(self.buttonHelp, SIGNAL("clicked()"), self.runHelp)
# repositories handling
self.connect(self.treeRepositories, SIGNAL("doubleClicked(QModelIndex)"), self.editRepository)
self.connect(self.buttonFetchRepositories, SIGNAL("clicked()"), self.addKnownRepositories)
self.connect(self.buttonAddRep, SIGNAL("clicked()"), self.addRepository)
self.connect(self.buttonEditRep, SIGNAL("clicked()"), self.editRepository)
self.connect(self.buttonDeleteRep, SIGNAL("clicked()"), self.deleteRepository)
self.buttonEditRep.setEnabled(False)
self.buttonDeleteRep.setEnabled(False)
# configuration widgets
self.connect(self.checkUpdates, SIGNAL("toggled (bool)"), self.changeCheckingPolicy)
self.connect(self.comboInterval, SIGNAL("currentIndexChanged (int)"), self.changeCheckingInterval)
self.connect(self.radioPluginType0, SIGNAL("toggled (bool)"), self.changePluginPolicy)
self.connect(self.radioPluginType1, SIGNAL("toggled (bool)"), self.changePluginPolicy)
self.connect(self.radioPluginType2, SIGNAL("toggled (bool)"), self.changePluginPolicy)
if repositories.checkingOnStart():
self.checkUpdates.setChecked(Qt.Checked)
else:
self.checkUpdates.setChecked(Qt.Unchecked)
interval = repositories.checkingOnStartInterval()
intervals = [0,1,3,7,14,30] # days
if intervals.count(interval):
index = intervals.index(interval)
else:
index = 1
self.comboInterval.setCurrentIndex(index)
self.populateMostWidgets()
# ----------------------------------------- #
def getAllAvailablePlugins(self):
""" fetch plugins from all repositories """
repositories.load()
plugins.getAllInstalled()
for key in repositories.allEnabled():
repositories.requestFetching(key)
if repositories.fetchingInProgress():
self.fetchDlg = QgsPluginInstallerFetchingDialog(self)
self.fetchDlg.exec_()
del self.fetchDlg
for key in repositories.all():
repositories.killConnection(key)
# display error messages for every unavailable reposioty, unless Shift pressed nor all repositories are unavailable
keepQuiet = QgsApplication.keyboardModifiers() == Qt.KeyboardModifiers(Qt.ShiftModifier)
if repositories.allUnavailable() and repositories.allUnavailable() != repositories.allEnabled():
for key in repositories.allUnavailable():
if not keepQuiet:
QMessageBox.warning(self, self.tr("QGIS Python Plugin Installer"), self.tr("Error reading repository:") + " " + key + "\n" + repositories.all()[key]["error"])
if QgsApplication.keyboardModifiers() == Qt.KeyboardModifiers(Qt.ShiftModifier):
keepQuiet = True
# ----------------------------------------- #
def populateMostWidgets(self):
self.comboFilter1.clear()
self.comboFilter1.addItem(self.tr("all repositories"))
self.treeRepositories.clear()
for key in repositories.all():
a = QTreeWidgetItem(self.treeRepositories)
a.setText(1,key)
a.setText(2,repositories.all()[key]["url"])
if repositories.all()[key]["enabled"] and repositories.all()[key]["valid"]:
if repositories.all()[key]["state"] == 2:
a.setText(0,self.tr("connected"))
a.setIcon(0,QIcon(":/plugins/installer/repoConnected.png"))
a.setToolTip(0,self.tr("This repository is connected"))
self.comboFilter1.addItem(key)
else:
a.setText(0,self.tr("unavailable"))
a.setIcon(0,QIcon(":/plugins/installer/repoUnavailable.png"))
a.setToolTip(0,self.tr("This repository is enabled, but unavailable"))
self.comboFilter1.addItem(key)
else:
a.setText(0,self.tr("disabled"))
a.setIcon(0,QIcon(":/plugins/installer/repoDisabled.png"))
if repositories.all()[key]["valid"]:
a.setToolTip(0,self.tr("This repository is disabled"))
else:
a.setToolTip(0,self.tr("This repository is blocked due to incompatibility with your Quantum GIS version"))
for i in [0,1,2]:
a.setForeground(i,QBrush(QColor(Qt.gray)))
for i in [0,1,2]:
self.treeRepositories.resizeColumnToContents(i)
self.comboFilter1.addItem(self.tr("orphans"))
# fill the status filter comboBox
self.comboFilter2.clear()
self.comboFilter2.addItem(self.tr("any status"))
self.comboFilter2.addItem(self.tr("not installed", "plural"))
self.comboFilter2.addItem(self.tr("installed", "plural"))
if plugins.isThereAnythingNew():
self.comboFilter2.addItem(self.tr("upgradeable and news"))
#set configuration widgets (dependent on the repository list)
if len(repositories.all()) == 1:
self.radioPluginType0.setEnabled(False)
self.radioPluginType1.setEnabled(False)
self.radioPluginType2.setEnabled(False)
else:
self.radioPluginType0.setEnabled(True)
self.radioPluginType1.setEnabled(True)
self.radioPluginType2.setEnabled(True)
settings = QSettings()
(i, ok) = settings.value(settingsGroup+"/allowedPluginType", QVariant(2)).toInt()
if i == 1 or len(repositories.all()) == 1:
self.radioPluginType0.setChecked(Qt.Checked)
elif i == 3:
self.radioPluginType2.setChecked(Qt.Checked)
else:
self.radioPluginType1.setChecked(Qt.Checked)
# ----------------------------------------- #
def filterChanged(self,i):
""" one of the filter widgets has been changed """
self.populatePluginTree()
# ----------------------------------------- #
def filterCheck(self,plugin):
""" the filter for the pluginsTree """
if self.comboFilter1.currentIndex() != 0 and self.comboFilter1.currentText() != self.tr("orphans"):
if self.comboFilter1.currentText() != plugin["repository"]:
return False
elif self.comboFilter1.currentText() == self.tr("orphans"):
if plugin["status"] != "orphan":
return False
if self.comboFilter2.currentIndex() == 1 and not plugin["status"] in ["not installed","new"]:
return False
if self.comboFilter2.currentIndex() == 2 and not plugin["status"] in ["installed","upgradeable","newer","orphan"]:
return False
if self.comboFilter2.currentIndex() == 3 and not plugin["status"] in ["upgradeable","new"]:
return False
if self.lineFilter.text() == "":
return True
else:
for i in ["name","version_inst","version_avail","desc_repo","desc_local","author","status","repository"]:
item = QString(plugin[i])
if item != None:
if item.contains(self.lineFilter.text(), Qt.CaseInsensitive):
return True
return False
# ----------------------------------------- #
def populatePluginTree(self):
""" fill up the pluginTree """
statusTips={"not installed" : self.tr("This plugin is not installed"),
"installed" : self.tr("This plugin is installed"),
"upgradeable" : self.tr("This plugin is installed, but there is an updated version available"),
"orphan" : self.tr("This plugin is installed, but I can't find it in any enabled repository"),
"new" : self.tr("This plugin is not installed and is seen for the first time"),
"newer" : self.tr("This plugin is installed and is newer than its version available in a repository"),
"incompatible" : self.tr("This plugin is incompatible with your Quantum GIS version and probably won't work."),
"dependent" : self.tr("The required Python module is not installed.\nFor more information, please visit its homepage and Quantum GIS wiki."),
"broken" : self.tr("This plugin seems to be broken.\nIt has been installed but can't be loaded.\nHere is the error message:")}
statuses ={"not installed" : self.tr("not installed", "singular"),
"installed" : self.tr("installed", "singular"),
"upgradeable" : self.tr("upgradeable", "singular"),
"orphan" : self.tr("installed", "singular"),
"new" : self.tr("new!", "singular"),
"newer" : self.tr("installed", "singular"),
"incompatible" : self.tr("invalid", "singular"),
"dependent" : self.tr("invalid", "singular"),
"broken" : self.tr("invalid", "singular")}
orderInvalid = ["incompatible","broken","dependent"]
orderValid = ["upgradeable","new","not installed","installed","orphan","newer"]
def addItem(p):
if self.filterCheck(p):
statusTip = statusTips[p["status"]]
if p["read-only"]:
statusTip = statusTip + "\n" + self.tr("Note that it's an uninstallable core plugin")
installedVersion = p["version_inst"]
if not installedVersion:
installedVersion = "?"
availableVersion = p["version_avail"]
if not availableVersion:
availableVersion = "?"
if p["status"] == "upgradeable":
ver = installedVersion + " -> " + availableVersion
elif p["status"] == "newer":
ver = installedVersion + " (" + availableVersion + ")"
elif p["status"] in ["not installed", "new"]:
ver = availableVersion
else:
ver = installedVersion
if p["status"] in ["upgradeable","newer"] or p["error"]:
verTip = self.tr("installed version") + ": " + installedVersion + "\n" + self.tr("available version") + ": " + availableVersion
elif p["status"] in ["not installed", "new"]:
verTip = self.tr("available version") + ": " + availableVersion
elif p["status"] == "installed":
verTip = self.tr("installed version") + ": " + installedVersion + "\n" + self.tr("That's the newest available version")
elif p["status"] == "orphan":
verTip = self.tr("installed version") + ": " + installedVersion + "\n" + self.tr("There is no version available for download")
else:
verTip = ""
if p["error"] == "broken":
desc = self.tr("This plugin is broken")
descTip = statusTips[p["error"]] + "\n" + p["error_details"]
statusTip = descTip
elif p["error"] == "incompatible":
desc = self.tr("This plugin requires a newer version of Quantum GIS") + " (" + self.tr("at least")+ " " + p["error_details"] + ")"
descTip = statusTips[p["error"]]
statusTip = descTip
elif p["error"] == "dependent":
desc = self.tr("This plugin requires a missing module") + " (" + p["error_details"] + ")"
descTip = statusTips[p["error"]]
statusTip = descTip
else:
desc = p["desc_local"]
descTip = p["desc_repo"]
if not desc:
desc = descTip
if not p["repository"]:
repository = self.tr("only locally available")
else:
repository = p["repository"]
a = QTreeWidgetItem(self.treePlugins)
if p["error"]:
a.setText(0,statuses[p["error"]])
else:
a.setText(0,statuses[p["status"]])
a.setToolTip(0,statusTip)
a.setText(1,p["name"])
a.setText(2,ver)
a.setToolTip(2,verTip)
a.setText(3,desc)
a.setToolTip(3,descTip)
a.setText(4,p["author"])
if p["homepage"]:
a.setToolTip(4,p["homepage"])
else:
a.setToolTip(4,"")
a.setText(5,repository)
a.setToolTip(5,p["url"])
# set fonts and colors
for i in [0,1,2,3,4,5]:
if p["error"]:
a.setForeground(i,QBrush(QColor(Qt.red)))
if p["status"] in ["new","upgradeable"] or p["error"]:
font = QFont()
font.setWeight(QFont.Bold)
a.setFont(i,font)
# -------- #
if not plugins.all():
return
self.treePlugins.clear()
for i in orderInvalid:
for p in plugins.all().values():
if p["error"] == i:
addItem(p)
n = 0 # displayed plugins count
self.upgradeablePlugins = [] # list of plugins able to update
for i in orderValid:
for p in plugins.all().values():
if p["status"] == i and not p["error"]:
addItem(p)
if p["status"] == "upgradeable": self.upgradeablePlugins += [p["localdir"]]
n +=1
self.setWindowTitle(self.tr("QGIS Python Plugin Installer") + self.tr(" - %d plugins available" % len(plugins.all())))
self.buttonUpgradeAll.setEnabled( len(self.upgradeablePlugins) )
# resize the columns
for i in [0,1,2,3,4,5]:
self.treePlugins.resizeColumnToContents(i)
for i in [0,1,2,4,5]:
if self.treePlugins.columnWidth(i) > 260:
self.treePlugins.setColumnWidth(i, 260)
if self.treePlugins.columnWidth(3) > 560:
self.treePlugins.setColumnWidth(3, 560)
# initially, keep order of inserting
self.treePlugins.sortItems(100,Qt.AscendingOrder)
# ----------------------------------------- #
def pluginTreeClicked(self):
""" the pluginsTree has been clicked """
buttons={"not installed":(True,False,self.tr("Install plugin")),
"installed":(True,True,self.tr("Reinstall plugin")),
"upgradeable":(True,True,self.tr("Upgrade plugin")),
"orphan":(False,True,self.tr("Install/upgrade plugin")),
"new":(True,False,self.tr("Install plugin")),
"newer":(True,True,self.tr("Downgrade plugin"))}
self.buttonInstall.setEnabled(False)
self.buttonInstall.setText(self.tr("Install/upgrade plugin"))
self.buttonUninstall.setEnabled(False)
if not self.treePlugins.selectedItems():
return
item = self.treePlugins.currentItem()
if not item:
return
key = plugins.keyByUrl(item.toolTip(5))
if not key:
return
plugin = plugins.all()[key]
if not plugin:
return
self.buttonInstall.setEnabled(buttons[plugin["status"]][0])
self.buttonUninstall.setEnabled(buttons[plugin["status"]][1])
self.buttonInstall.setText(buttons[plugin["status"]][2])
if plugin["read-only"]:
self.buttonUninstall.setEnabled(False)
# ----------------------------------------- #
def upgradeAllClicked(self):
for key in self.upgradeablePlugins:
self.installPlugin(key, quiet=True)
# ----------------------------------------- #
def installPluginClicked(self):
if not self.treePlugins.currentItem():
return
key = plugins.keyByUrl(self.treePlugins.currentItem().toolTip(5))
self.installPlugin(key)
# ----------------------------------------- #
def uninstallPluginClicked(self):
if not self.treePlugins.currentItem():
return
key = plugins.keyByUrl(self.treePlugins.currentItem().toolTip(5))
self.uninstallPlugin(key)
# ----------------------------------------- #
def installPlugin(self, key, quiet=False):
""" install currently selected plugin """
infoString = ('','')
plugin = plugins.all()[key]
previousStatus = plugin["status"]
if not plugin:
return
if plugin["status"] == "newer" and not plugin["error"]: # ask for confirmation if user downgrades an usable plugin
if QMessageBox.warning(self, self.tr("QGIS Python Plugin Installer"), self.tr("Are you sure you want to downgrade the plugin to the latest available version? The installed one is newer!"), QMessageBox.Yes, QMessageBox.No) == QMessageBox.No:
return
dlg = QgsPluginInstallerInstallingDialog(self,plugin)
dlg.exec_()
if dlg.result():
infoString = (self.tr("Plugin installation failed"), dlg.result())
elif not QDir(QDir.cleanPath(QgsApplication.qgisSettingsDirPath() + "/python/plugins/" + key)).exists():
infoString = (self.tr("Plugin has disappeared"), self.tr("The plugin seems to have been installed but I don't know where. Probably the plugin package contained a wrong named directory.\nPlease search the list of installed plugins. I'm nearly sure you'll find the plugin there, but I just can't determine which of them it is. It also means that I won't be able to determine if this plugin is installed and inform you about available updates. However the plugin may work. Please contact the plugin author and submit this issue."))
QApplication.setOverrideCursor(Qt.WaitCursor)
plugins.getAllInstalled()
plugins.rebuild()
QApplication.restoreOverrideCursor()
else:
if QGIS_14:
if QGIS_15: # update the list of plugins in plugin handling routines
updateAvailablePlugins()
# try to load the plugin
loadPlugin(plugin["localdir"])
else: # QGIS < 1.4
try:
exec ("sys.path_importer_cache.clear()")
exec ("import %s" % plugin["localdir"])
exec ("reload (%s)" % plugin["localdir"])
except:
pass
plugins.getAllInstalled()
plugins.rebuild()
plugin = plugins.all()[key]
if not plugin["error"]:
if previousStatus in ["not installed", "new"]:
if QGIS_14: # plugins can be started in python from QGIS >= 1.4
infoString = (self.tr("Plugin installed successfully"), self.tr("Plugin installed successfully"))
settings = QSettings()
settings.setValue("/PythonPlugins/"+plugin["localdir"], QVariant(True))
startPlugin(plugin["localdir"])
else: infoString = (self.tr("Plugin installed successfully"), self.tr("Python plugin installed.\nNow you need to enable it in Plugin Manager."))
else:
if QGIS_15: # plugins can be reloaded on the fly in QGIS >= 1.5
settings = QSettings()
if key != 'plugin_installer' and settings.value("/PythonPlugins/"+key).toBool(): # plugin will be reloaded on the fly only if currently loaded
infoString = (self.tr("Plugin reinstalled successfully"), self.tr("Plugin reinstalled successfully"))
reloadPlugin(key)
else:
infoString = (self.tr("Plugin reinstalled successfully"), self.tr("Python plugin reinstalled.\nYou need to restart Quantum GIS in order to reload it."))
else: infoString = (self.tr("Plugin reinstalled successfully"), self.tr("Python plugin reinstalled.\nYou need to restart Quantum GIS in order to reload it."))
if quiet:
infoString = (None, None)
else:
if plugin["error"] == "incompatible":
message = self.tr("The plugin is designed for a newer version of Quantum GIS. The minimum required version is:")
message += " <b>" + plugin["error_details"] + "</b>"
elif plugin["error"] == "dependent":
message = self.tr("The plugin depends on some components missing on your system. You need to install the following Python module in order to enable it:")
message += "<b> " + plugin["error_details"] + "</b>"
else:
message = self.tr("The plugin is broken. Python said:")
message += "<br><b>" + plugin["error_details"] + "</b>"
dlg = QgsPluginInstallerPluginErrorDialog(self,message)
dlg.exec_()
if dlg.result():
# revert installation
plugins.getAllInstalled()
plugins.rebuild()
pluginDir = QFileInfo(QgsApplication.qgisUserDbFilePath()).path() + "/python/plugins/" + plugin["localdir"]
removeDir(pluginDir)
if QDir(pluginDir).exists():
infoString = (self.tr("Plugin uninstall failed"), result)
try:
exec ("sys.path_importer_cache.clear()")
exec ("import %s" % plugin["localdir"])
exec ("reload (%s)" % plugin["localdir"])
except:
pass
else:
try:
exec ("del sys.modules[%s]" % plugin["localdir"])
except:
pass
plugins.getAllInstalled()
plugins.rebuild()
if plugins.all().has_key(key) and not plugins.all()[key]["status"] in ["not installed", "new"]:
if previousStatus in ["not installed", "new"]:
history.markChange(key,'A')
else:
history.markChange(key,'R')
self.populatePluginTree()
if infoString[0]:
QMessageBox.information(self, infoString[0], infoString[1])
# ----------------------------------------- #
def uninstallPlugin(self,key):
""" uninstall currently selected plugin """
plugin = plugins.all()[key]
if not plugin:
return
warning = self.tr("Are you sure you want to uninstall the following plugin?") + "\n(" + plugin["name"] + ")"
if plugin["status"] == "orphan" and not plugin["error"]:
warning += "\n\n"+self.tr("Warning: this plugin isn't available in any accessible repository!")
if QMessageBox.warning(self, self.tr("QGIS Python Plugin Installer"), warning , QMessageBox.Yes, QMessageBox.No) == QMessageBox.No:
return
# unload the plugin if it's not plugin_installer itself (otherwise, do it after removing its directory):
if key != "plugin_installer":
try:
unloadPlugin(key)
except:
pass
pluginDir = QFileInfo(QgsApplication.qgisUserDbFilePath()).path() + "/python/plugins/" + plugin["localdir"]
result = removeDir(pluginDir)
if result:
QMessageBox.warning(self, self.tr("Plugin uninstall failed"), result)
else:
# if the uninstalled plugin is the installer itself, reload it and quit
if key == "plugin_installer":
if QGIS_15:
try:
QMessageBox.information(self, self.tr("QGIS Python Plugin Installer"), self.tr("Plugin Installer update uninstalled. Plugin Installer will now close and revert to its primary version. You can find it in the Plugins menu and continue operation."))
reloadPlugin(key)
return
except:
pass
else:
QMessageBox.information(self, self.tr("QGIS Python Plugin Installer"), self.tr("Plugin Installer update uninstalled. Please restart QGIS in order to load its primary version."))
# safe remove
try:
unloadPlugin(plugin["localdir"])
except:
pass
try:
exec ("plugins[%s].unload()" % plugin["localdir"])
exec ("del plugins[%s]" % plugin["localdir"])
except:
pass
try:
exec ("del sys.modules[%s]" % plugin["localdir"])
except:
pass
plugins.getAllInstalled()
plugins.rebuild()
self.populatePluginTree()
if QGIS_14: QMessageBox.information(self, self.tr("Plugin uninstalled successfully"), self.tr("Plugin uninstalled successfully"))
else: QMessageBox.information(self, self.tr("Plugin uninstalled successfully"), self.tr("Python plugin uninstalled. Note that you may need to restart Quantum GIS in order to remove it completely."))
history.markChange(key,'D')
# ----------------------------------------- #
def repositoryTreeClicked(self):
""" the repositoryTree has been clicked """
if self.treeRepositories.selectedItems():
self.buttonEditRep.setEnabled(True)
self.buttonDeleteRep.setEnabled(True)
else:
self.buttonEditRep.setEnabled(False)
self.buttonDeleteRep.setEnabled(False)
# ----------------------------------------- #
def changeCheckingPolicy(self,policy):
""" the Checking On Start checkbox has been clicked """
if policy:
repositories.setCheckingOnStart(True)
else:
repositories.setCheckingOnStart(False)
# ----------------------------------------- #
def changeCheckingInterval(self,interval):
""" the Checking on start interval combobox has been clicked """
intervals = [0,1,3,7,14,30]
repositories.setCheckingOnStartInterval(intervals[interval])
# ----------------------------------------- #
def changePluginPolicy(self, state):
""" one of the plugin type radiobuttons has been clicked """
if not state: # radio button released
return
if self.radioPluginType0.isChecked():
i = 1
elif self.radioPluginType1.isChecked():
i = 2
else:
i = 3
settings = QSettings()
settings.setValue(settingsGroup+"/allowedPluginType", QVariant(i))
plugins.rebuild()
self.populatePluginTree()
# ----------------------------------------- #
def addKnownRepositories(self):
""" update list of known repositories - in the future it will be replaced with an online fetching """
message = self.tr("You are about to add several plugin repositories that are neither authorized nor supported by the Quantum GIS team. Plugin authors generally make efforts to ensure that their work is useful and safe, however, we can assume no responsibility for them.")
if QMessageBox.question(self, self.tr("QGIS Python Plugin Installer"), message, QMessageBox.Ok, QMessageBox.Abort) == QMessageBox.Ok:
repositories.addKnownRepos()
# refresh lists and populate widgets
QApplication.setOverrideCursor(Qt.WaitCursor)
self.getAllAvailablePlugins()
plugins.rebuild()
self.populateMostWidgets()
self.populatePluginTree()
QApplication.restoreOverrideCursor()
# ----------------------------------------- #
def addRepository(self):
""" add repository button has been clicked """
dlg = QgsPluginInstallerRepositoryDialog(self)
dlg.checkBoxEnabled.setCheckState(Qt.Checked)
if not dlg.exec_():
return
for i in repositories.all().values():
if dlg.editURL.text().trimmed() == i["url"]:
QMessageBox.warning(self, self.tr("QGIS Python Plugin Installer"), self.tr("Unable to add another repository with the same URL!"))
return
settings = QSettings()
settings.beginGroup(self.reposGroup)
reposName = dlg.editName.text()
reposURL = dlg.editURL.text().trimmed()
if repositories.all().has_key(reposName):
reposName = reposName + "(2)"
# add to settings
settings.setValue(reposName+"/url", QVariant(reposURL))
settings.setValue(reposName+"/enabled", QVariant(bool(dlg.checkBoxEnabled.checkState())))
# refresh lists and populate widgets
QApplication.setOverrideCursor(Qt.WaitCursor)
plugins.removeRepository(reposName)
self.getAllAvailablePlugins()
plugins.rebuild()
self.populateMostWidgets()
self.populatePluginTree()
QApplication.restoreOverrideCursor()
# ----------------------------------------- #
def editRepository(self):
""" edit repository button has been clicked """
checkState={False:Qt.Unchecked,True:Qt.Checked}
current = self.treeRepositories.currentItem()
if current == None:
return
reposName = current.text(1)
dlg = QgsPluginInstallerRepositoryDialog(self)
dlg.editName.setText(reposName)
dlg.editURL.setText(repositories.all()[reposName]["url"])
dlg.checkBoxEnabled.setCheckState(checkState[repositories.all()[reposName]["enabled"]])
if repositories.all()[reposName]["valid"]:
dlg.checkBoxEnabled.setEnabled(True)
dlg.labelInfo.setText("")
else:
dlg.checkBoxEnabled.setEnabled(False)
dlg.labelInfo.setText(self.tr("This repository is blocked due to incompatibility with your Quantum GIS version"))
dlg.labelInfo.setFrameShape(QFrame.Box)
if not dlg.exec_():
return # nothing to do if cancelled
for i in repositories.all().values():
if dlg.editURL.text().trimmed() == i["url"] and dlg.editURL.text().trimmed() != repositories.all()[reposName]["url"]:
QMessageBox.warning(self, self.tr("QGIS Python Plugin Installer"), self.tr("Unable to add another repository with the same URL!"))
return
# delete old repo from QSettings and create new one
settings = QSettings()
settings.beginGroup(self.reposGroup)
settings.remove(reposName)
newName = dlg.editName.text()
if repositories.all().has_key(newName) and newName != reposName:
newName = newName + "(2)"
settings.setValue(newName+"/url", QVariant(dlg.editURL.text().trimmed()))
settings.setValue(newName+"/enabled", QVariant(bool(dlg.checkBoxEnabled.checkState())))
if dlg.editURL.text().trimmed() == repositories.all()[reposName]["url"] and dlg.checkBoxEnabled.checkState() == checkState[repositories.all()[reposName]["enabled"]]:
repositories.rename(reposName, newName)
self.populateMostWidgets()
return # nothing else to do if only repository name was changed
# refresh lists and populate widgets
QApplication.setOverrideCursor(Qt.WaitCursor)
plugins.removeRepository(reposName)
self.getAllAvailablePlugins()
plugins.rebuild()
self.populateMostWidgets()
self.populatePluginTree()
QApplication.restoreOverrideCursor()
# ----------------------------------------- #
def deleteRepository(self):
""" delete repository button has been clicked """
current = self.treeRepositories.currentItem()
if current == None:
return
warning = self.tr("Are you sure you want to remove the following repository?") + "\n" + current.text(1)
if QMessageBox.warning(self, self.tr("QGIS Python Plugin Installer"), warning , QMessageBox.Yes, QMessageBox.No) == QMessageBox.No:
return
reposName = current.text(1)
# delete from the settings, refresh data and repopulate all the widgets
settings = QSettings()
settings.beginGroup(self.reposGroup)
settings.remove(reposName)
repositories.remove(reposName)
plugins.removeRepository(reposName)
plugins.rebuild()
self.populateMostWidgets()
self.populatePluginTree()
# ----------------------------------------- #
def runHelp(self):
""" open the context help browser """
QgsContextHelp.run("QgsPluginInstallerDialog")
# ----------------------------------------- #
def reject(self):
""" update the list of seen plugins before exit (both 'done' and 'x' buttons emit 'reject' signal) """
plugins.updateSeenPluginsList()
QDialog.reject(self)
# --- /class QgsPluginInstallerDialog ------------------------------------------------------------------------ #
|
mola/qgis
|
python/plugins/plugin_installer/installer_gui.py
|
Python
|
gpl-2.0
| 41,380
|
[
"VisIt"
] |
443762e2a77c0418a3e5352393934848deb21333cc868b2f394e0f863fa437bb
|
#
# The Qubes OS Project, https://www.qubes-os.org/
#
# Copyright (C) 2010-2015 Joanna Rutkowska <joanna@invisiblethingslab.com>
# Copyright (C) 2014-2015 Wojtek Porczyk <woju@invisiblethingslab.com>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
'''Documentation helpers.
This module contains classes and functions which help to maintain documentation,
particularly our custom Sphinx extension.
'''
import argparse
import io
import json
import os
import re
import urllib.error
import urllib.request
import docutils
import docutils.nodes
import docutils.parsers.rst
import docutils.parsers.rst.roles
import docutils.statemachine
import sphinx
import sphinx.errors
import sphinx.locale
import sphinx.util.docfields
import qubes.tools
SUBCOMMANDS_TITLE = 'COMMANDS'
OPTIONS_TITLE = 'OPTIONS'
class GithubTicket(object):
# pylint: disable=too-few-public-methods
def __init__(self, data):
self.number = data['number']
self.summary = data['title']
self.uri = data['html_url']
def fetch_ticket_info(app, number):
'''Fetch info about particular trac ticket given
:param app: Sphinx app object
:param str number: number of the ticket, without #
:rtype: mapping
:raises: urllib.error.HTTPError
'''
response = urllib.request.urlopen(urllib.request.Request(
app.config.ticket_base_uri.format(number=number),
headers={
'Accept': 'application/vnd.github.v3+json',
'User-agent': __name__}))
return GithubTicket(json.load(response))
def ticket(name, rawtext, text, lineno, inliner, options=None, content=None):
'''Link to qubes ticket
:param str name: The role name used in the document
:param str rawtext: The entire markup snippet, with role
:param str text: The text marked with the role
:param int lineno: The line number where rawtext appears in the input
:param docutils.parsers.rst.states.Inliner inliner: The inliner instance \
that called this function
:param options: Directive options for customisation
:param content: The directive content for customisation
''' # pylint: disable=unused-argument
if options is None:
options = {}
ticketno = text.lstrip('#')
if not ticketno.isdigit():
msg = inliner.reporter.error(
'Invalid ticket identificator: {!r}'.format(text), line=lineno)
prb = inliner.problematic(rawtext, rawtext, msg)
return [prb], [msg]
try:
info = fetch_ticket_info(inliner.document.settings.env.app, ticketno)
except urllib.error.HTTPError as e:
msg = inliner.reporter.error(
'Error while fetching ticket info: {!s}'.format(e), line=lineno)
prb = inliner.problematic(rawtext, rawtext, msg)
return [prb], [msg]
docutils.parsers.rst.roles.set_classes(options)
node = docutils.nodes.reference(
rawtext,
'#{} ({})'.format(info.number, info.summary),
refuri=info.uri,
**options)
return [node], []
class versioncheck(docutils.nodes.warning):
# pylint: disable=invalid-name
pass
def visit(self, node):
self.visit_admonition(node, 'version')
def depart(self, node):
self.depart_admonition(node)
sphinx.locale.admonitionlabels['version'] = 'Version mismatch'
class VersionCheck(docutils.parsers.rst.Directive):
'''Directive versioncheck
Check if current version (from ``conf.py``) equals version specified as
argument. If not, generate warning.'''
has_content = True
required_arguments = 1
optional_arguments = 0
final_argument_whitespace = True
option_spec = {}
def run(self):
current = self.state.document.settings.env.app.config.version
version = self.arguments[0]
if current == version:
return []
text = ' '.join('''This manual page was written for version **{}**, but
current version at the time when this page was generated is **{}**.
This may or may not mean that page is outdated or has
inconsistencies.'''.format(version, current).split())
node = versioncheck(text)
node['classes'] = ['admonition', 'warning']
self.state.nested_parse(docutils.statemachine.StringList([text]),
self.content_offset, node)
return [node]
def make_rst_section(heading, char):
return '{}\n{}\n\n'.format(heading, char[0] * len(heading))
def prepare_manpage(command):
parser = qubes.tools.get_parser_for_command(command)
stream = io.StringIO()
stream.write('.. program:: {}\n\n'.format(command))
stream.write(make_rst_section(
':program:`{}` -- {}'.format(command, parser.description), '='))
stream.write('''.. warning::
This page was autogenerated from command-line parser. It shouldn't be 1:1
conversion, because it would add little value. Please revise it and add
more descriptive help, which normally won't fit in standard ``--help``
option.
After rewrite, please remove this admonition.\n\n''')
stream.write(make_rst_section('Synopsis', '-'))
usage = ' '.join(parser.format_usage().strip().split())
if usage.startswith('usage: '):
usage = usage[len('usage: '):]
# replace METAVARS with *METAVARS*
usage = re.sub(r'\b([A-Z]{2,})\b', r'*\1*', usage)
stream.write(':command:`{}` {}\n\n'.format(command, usage))
stream.write(make_rst_section('Options', '-'))
for action in parser._actions: # pylint: disable=protected-access
stream.write('.. option:: ')
if action.metavar:
stream.write(', '.join('{}{}{}'.format(
option,
'=' if option.startswith('--') else ' ',
action.metavar)
for option in sorted(action.option_strings)))
else:
stream.write(', '.join(sorted(action.option_strings)))
stream.write('\n\n {}\n\n'.format(action.help))
stream.write(make_rst_section('Authors', '-'))
stream.write('''\
| Joanna Rutkowska <joanna at invisiblethingslab dot com>
| Rafal Wojtczuk <rafal at invisiblethingslab dot com>
| Marek Marczykowski <marmarek at invisiblethingslab dot com>
| Wojtek Porczyk <woju at invisiblethingslab dot com>
.. vim: ts=3 sw=3 et tw=80
''')
return stream.getvalue()
class OptionsCheckVisitor(docutils.nodes.SparseNodeVisitor):
''' Checks if the visited option nodes and the specified args are in sync.
'''
def __init__(self, command, args, document):
assert isinstance(args, set)
docutils.nodes.SparseNodeVisitor.__init__(self, document)
self.command = command
self.args = args
def visit_desc(self, node):
''' Skips all but 'option' elements '''
# pylint: disable=no-self-use
if not node.get('desctype', None) == 'option':
raise docutils.nodes.SkipChildren
def visit_desc_name(self, node):
''' Checks if the option is defined `self.args` '''
if not isinstance(node[0], docutils.nodes.Text):
raise sphinx.errors.SphinxError('first child should be Text')
arg = str(node[0])
try:
self.args.remove(arg)
except KeyError:
raise sphinx.errors.SphinxError(
'No such argument for {!r}: {!r}'.format(self.command, arg))
def check_undocumented_arguments(self, ignored_options=None):
''' Call this to check if any undocumented arguments are left.
While the documentation talks about a
'SparseNodeVisitor.depart_document()' function, this function does
not exists. (For details see implementation of
:py:meth:`NodeVisitor.dispatch_departure()`) So we need to
manually call this.
'''
if ignored_options is None:
ignored_options = set()
left_over_args = self.args - ignored_options
if left_over_args:
raise sphinx.errors.SphinxError(
'Undocumented arguments for command {!r}: {!r}'.format(
self.command, ', '.join(sorted(left_over_args))))
class CommandCheckVisitor(docutils.nodes.SparseNodeVisitor):
''' Checks if the visited sub command section nodes and the specified sub
command args are in sync.
'''
def __init__(self, command, sub_commands, document):
docutils.nodes.SparseNodeVisitor.__init__(self, document)
self.command = command
self.sub_commands = sub_commands
def visit_section(self, node):
''' Checks if the visited sub-command section nodes exists and it
options are in sync.
Uses :py:class:`OptionsCheckVisitor` for checking
sub-commands options
'''
# pylint: disable=no-self-use
title = str(node[0][0])
if title.upper() == SUBCOMMANDS_TITLE:
return
sub_cmd = self.command + ' ' + title
try:
args = self.sub_commands[title]
options_visitor = OptionsCheckVisitor(sub_cmd, args, self.document)
node.walkabout(options_visitor)
options_visitor.check_undocumented_arguments(
{'--help', '--quiet', '--verbose', '-h', '-q', '-v'})
del self.sub_commands[title]
except KeyError:
raise sphinx.errors.SphinxError(
'No such sub-command {!r}'.format(sub_cmd))
def visit_Text(self, node):
''' If the visited text node starts with 'alias: ', all the provided
comma separted alias in this node, are removed from
`self.sub_commands`
'''
# pylint: disable=invalid-name
text = str(node).strip()
if text.startswith('aliases:'):
aliases = {a.strip() for a in text.split('aliases:')[1].split(',')}
for alias in aliases:
assert alias in self.sub_commands
del self.sub_commands[alias]
def check_undocumented_sub_commands(self):
''' Call this to check if any undocumented sub_commands are left.
While the documentation talks about a
'SparseNodeVisitor.depart_document()' function, this function does
not exists. (For details see implementation of
:py:meth:`NodeVisitor.dispatch_departure()`) So we need to
manually call this.
'''
if self.sub_commands:
raise sphinx.errors.SphinxError(
'Undocumented commands for {!r}: {!r}'.format(
self.command, ', '.join(sorted(self.sub_commands.keys()))))
class ManpageCheckVisitor(docutils.nodes.SparseNodeVisitor):
''' Checks if the sub-commands and options specified in the 'COMMAND' and
'OPTIONS' (case insensitve) sections in sync the command parser.
'''
def __init__(self, app, command, document):
docutils.nodes.SparseNodeVisitor.__init__(self, document)
try:
parser = qubes.tools.get_parser_for_command(command)
except ImportError:
app.warn('cannot import module for command')
self.parser = None
return
except AttributeError:
raise sphinx.errors.SphinxError('cannot find parser in module')
self.command = command
self.parser = parser
self.options = set()
self.sub_commands = {}
self.app = app
# pylint: disable=protected-access
for action in parser._actions:
if action.help == argparse.SUPPRESS:
continue
if issubclass(action.__class__,
qubes.tools.AliasedSubParsersAction):
for cmd, cmd_parser in action._name_parser_map.items():
self.sub_commands[cmd] = set()
for sub_action in cmd_parser._actions:
if sub_action.help != argparse.SUPPRESS:
self.sub_commands[cmd].update(
sub_action.option_strings)
else:
self.options.update(action.option_strings)
def visit_section(self, node):
''' If section title is OPTIONS or COMMANDS dispatch the apropriate
`NodeVisitor`.
'''
if self.parser is None:
return
section_title = str(node[0][0]).upper()
if section_title == OPTIONS_TITLE:
options_visitor = OptionsCheckVisitor(self.command, self.options,
self.document)
node.walkabout(options_visitor)
options_visitor.check_undocumented_arguments()
elif section_title == SUBCOMMANDS_TITLE:
sub_cmd_visitor = CommandCheckVisitor(
self.command, self.sub_commands, self.document)
node.walkabout(sub_cmd_visitor)
sub_cmd_visitor.check_undocumented_sub_commands()
def check_man_args(app, doctree, docname):
''' Checks the manpage for undocumented or obsolete sub-commands and
options.
'''
dirname, command = os.path.split(docname)
if os.path.basename(dirname) != 'manpages':
return
app.info('Checking arguments for {!r}'.format(command))
doctree.walk(ManpageCheckVisitor(app, command, doctree))
#
# this is lifted from sphinx' own conf.py
#
event_sig_re = re.compile(r'([a-zA-Z-:<>]+)\s*\((.*)\)')
def parse_event(env, sig, signode):
# pylint: disable=unused-argument
m = event_sig_re.match(sig)
if not m:
signode += sphinx.addnodes.desc_name(sig, sig)
return sig
name, args = m.groups()
signode += sphinx.addnodes.desc_name(name, name)
plist = sphinx.addnodes.desc_parameterlist()
for arg in args.split(','):
arg = arg.strip()
plist += sphinx.addnodes.desc_parameter(arg, arg)
signode += plist
return name
#
# end of codelifting
#
def break_to_pdb(app, *dummy):
if not app.config.break_to_pdb:
return
import pdb
pdb.set_trace()
def setup(app):
app.add_role('ticket', ticket)
app.add_config_value(
'ticket_base_uri',
'https://api.github.com/repos/QubesOS/qubes-issues/issues/{number}',
'env')
app.add_config_value('break_to_pdb', False, 'env')
app.add_node(versioncheck,
html=(visit, depart),
man=(visit, depart))
app.add_directive('versioncheck', VersionCheck)
fdesc = sphinx.util.docfields.GroupedField('parameter', label='Parameters',
names=['param'], can_collapse=True)
app.add_object_type('event', 'event', 'pair: %s; event', parse_event,
doc_field_types=[fdesc])
app.connect('doctree-resolved', break_to_pdb)
app.connect('doctree-resolved', check_man_args)
# vim: ts=4 sw=4 et
|
kalkin/qubes-core-admin
|
qubes/dochelpers.py
|
Python
|
gpl-2.0
| 15,474
|
[
"VisIt"
] |
04facebf3dac7d6cf6165075e6c7ff1e6115e0dec7fe9a5cb2b43a2b95993a1f
|
#!/usr/bin/env python3
import math
from numbers import Number
import torch
from torch.distributions import constraints
from torch.nn import Module as TModule
from .prior import Prior
class WishartPrior(Prior):
"""Wishart prior over n x n positive definite matrices
pdf(Sigma) ~ |Sigma|^(nu - n - 1)/2 * exp(-0.5 * Trace(K^-1 Sigma))
where nu > n - 1 are the degrees of freedom and K > 0 is the p x p scale matrix
Reference: A. Shah, A. G. Wilson, and Z. Ghahramani. Student-t Processes as
Alternatives to Gaussian Processes. ArXiv e-prints, Feb. 2014.
"""
arg_constraints = {"K_inv": constraints.positive_definite, "nu": constraints.positive}
support = constraints.positive_definite
_validate_args = True
def __init__(self, nu, K, validate_args=False):
TModule.__init__(self)
if K.dim() < 2:
raise ValueError("K must be at least 2-dimensional")
n = K.shape[-1]
if K.shape[-2] != K.shape[-1]:
raise ValueError("K must be square")
if isinstance(nu, Number):
nu = torch.tensor(float(nu))
if torch.any(nu <= n):
raise ValueError("Must have nu > n - 1")
self.n = torch.tensor(n, dtype=torch.long, device=nu.device)
batch_shape = nu.shape
event_shape = torch.Size([n, n])
# normalization constant
logdetK = torch.logdet(K)
C = -(nu / 2) * (logdetK + n * math.log(2)) - torch.mvlgamma(nu / 2, n)
K_inv = torch.inverse(K)
# need to assign values before registering as buffers to make argument validation work
self.nu = nu
self.K_inv = K_inv
self.C = C
super(WishartPrior, self).__init__(batch_shape, event_shape, validate_args=validate_args)
# now need to delete to be able to register buffer
del self.nu, self.K_inv, self.C
self.register_buffer("nu", nu)
self.register_buffer("K_inv", K_inv)
self.register_buffer("C", C)
def log_prob(self, X):
# I'm sure this could be done more elegantly
logdetp = torch.logdet(X)
Kinvp = torch.matmul(self.K_inv, X)
trKinvp = torch.diagonal(Kinvp, dim1=-2, dim2=-1).sum(-1)
return self.C + 0.5 * (self.nu - self.n - 1) * logdetp - trKinvp
class InverseWishartPrior(Prior):
"""Inverse Wishart prior over n x n positive definite matrices
pdf(Sigma) ~ |Sigma|^-(nu + 2 * n)/2 * exp(-0.5 * Trace(K Sigma^-1))
where nu > 0 are the degrees of freedom and K > 0 is the p x p scale matrix
Reference: A. Shah, A. G. Wilson, and Z. Ghahramani. Student-t Processes as
Alternatives to Gaussian Processes. ArXiv e-prints, Feb. 2014.
"""
arg_constraints = {"K": constraints.positive_definite, "nu": constraints.positive}
support = constraints.positive_definite
_validate_args = True
def __init__(self, nu, K, validate_args=False):
TModule.__init__(self)
if K.dim() < 2:
raise ValueError("K must be at least 2-dimensional")
n = K.shape[-1]
if isinstance(nu, Number):
nu = torch.tensor(float(nu))
if torch.any(nu <= 0):
raise ValueError("Must have nu > 0")
self.n = torch.tensor(n, dtype=torch.long, device=nu.device)
batch_shape = nu.shape
event_shape = torch.Size([n, n])
# normalization constant
c = (nu + n - 1) / 2
logdetK = torch.logdet(K)
C = c * (logdetK - n * math.log(2)) - torch.mvlgamma(c, n)
# need to assign values before registering as buffers to make argument validation work
self.nu = nu
self.K = K
self.C = C
super(InverseWishartPrior, self).__init__(batch_shape, event_shape, validate_args=validate_args)
# now need to delete to be able to register buffer
del self.nu, self.K, self.C
self.register_buffer("nu", nu)
self.register_buffer("K", K)
self.register_buffer("C", C)
def log_prob(self, X):
logdetp = torch.logdet(X)
pinvK = torch.solve(self.K, X)[0]
trpinvK = torch.diagonal(pinvK, dim1=-2, dim2=-1).sum(-1) # trace in batch mode
return self.C - 0.5 * ((self.nu + 2 * self.n) * logdetp + trpinvK)
|
jrg365/gpytorch
|
gpytorch/priors/wishart_prior.py
|
Python
|
mit
| 4,266
|
[
"Gaussian"
] |
5af755cfc081254a9416c03042c22cf7e484e98eed5157e0c7bec81950fb3722
|
# -*- coding: utf-8 -*-
# Copyright 2007-2022 The HyperSpy developers
#
# This file is part of HyperSpy.
#
# HyperSpy is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# HyperSpy is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with HyperSpy. If not, see <http://www.gnu.org/licenses/>.
import os
import logging
import math
import matplotlib.pyplot as plt
import numpy as np
import dask.array as da
from scipy import interpolate
from scipy.signal import savgol_filter, medfilt
from scipy.ndimage.filters import gaussian_filter1d
from hyperspy.signal import BaseSignal
from hyperspy._signals.common_signal1d import CommonSignal1D
from hyperspy.signal_tools import SpikesRemoval, SpikesRemovalInteractive
from hyperspy.models.model1d import Model1D
from hyperspy.misc.lowess_smooth import lowess
from hyperspy.misc.utils import is_binned # remove in v2.0
from hyperspy.defaults_parser import preferences
from hyperspy.signal_tools import (
Signal1DCalibration,
SmoothingSavitzkyGolay,
SmoothingLowess,
SmoothingTV,
ButterworthFilter)
from hyperspy.ui_registry import DISPLAY_DT, TOOLKIT_DT
from hyperspy.misc.tv_denoise import _tv_denoise_1d
from hyperspy.signal_tools import BackgroundRemoval
from hyperspy.decorators import interactive_range_selector
from hyperspy.signal_tools import IntegrateArea, _get_background_estimator
from hyperspy._signals.lazy import LazySignal
from hyperspy.docstrings.signal1d import CROP_PARAMETER_DOC, SPIKES_REMOVAL_TOOL_DOCSTRING
from hyperspy.docstrings.signal import (SHOW_PROGRESSBAR_ARG, PARALLEL_ARG, MAX_WORKERS_ARG,
SIGNAL_MASK_ARG, NAVIGATION_MASK_ARG)
from hyperspy.docstrings.plot import (
BASE_PLOT_DOCSTRING, BASE_PLOT_DOCSTRING_PARAMETERS, PLOT1D_DOCSTRING)
_logger = logging.getLogger(__name__)
def find_peaks_ohaver(y, x=None, slope_thresh=0., amp_thresh=None,
medfilt_radius=5, maxpeakn=30000, peakgroup=10,
subchannel=True,):
"""Find peaks along a 1D line.
Function to locate the positive peaks in a noisy x-y data set.
Detects peaks by looking for downward zero-crossings in the first
derivative that exceed 'slope_thresh'.
Returns an array containing position, height, and width of each peak.
Sorted by position.
'slope_thresh' and 'amp_thresh', control sensitivity: higher values
will neglect wider peaks (slope) and smaller features (amp),
respectively.
Parameters
----------
y : array
1D input array, e.g. a spectrum
x : array (optional)
1D array describing the calibration of y (must have same shape as y)
slope_thresh : float (optional)
1st derivative threshold to count the peak;
higher values will neglect broader features;
default is set to 0.
amp_thresh : float (optional)
intensity threshold below which peaks are ignored;
higher values will neglect smaller features;
default is set to 10% of max(y).
medfilt_radius : int (optional)
median filter window to apply to smooth the data
(see scipy.signal.medfilt);
if 0, no filter will be applied;
default is set to 5.
peakgroup : int (optional)
number of points around the "top part" of the peak that
are taken to estimate the peak height; for spikes or
very narrow peaks, keep PeakGroup=1 or 2; for broad or
noisy peaks, make PeakGroup larger to reduce the effect
of noise;
default is set to 10.
maxpeakn : int (optional)
number of maximum detectable peaks;
default is set to 30000.
subchannel : bool (optional)
default is set to True.
Returns
-------
P : structured array of shape (npeaks)
contains fields: 'position', 'width', and 'height' for each peak.
Examples
--------
>>> x = np.arange(0,50,0.01)
>>> y = np.cos(x)
>>> peaks = find_peaks_ohaver(y, x, 0, 0)
Notes
-----
Original code from T. C. O'Haver, 1995.
Version 2 Last revised Oct 27, 2006 Converted to Python by
Michael Sarahan, Feb 2011.
Revised to handle edges better. MCS, Mar 2011
"""
if x is None:
x = np.arange(len(y), dtype=np.int64)
if not amp_thresh:
amp_thresh = 0.1 * y.max()
peakgroup = np.round(peakgroup)
if medfilt_radius:
d = np.gradient(medfilt(y, medfilt_radius))
else:
d = np.gradient(y)
n = np.round(peakgroup / 2 + 1)
peak_dt = np.dtype([('position', float),
('height', float),
('width', float)])
P = np.array([], dtype=peak_dt)
peak = 0
for j in range(len(y) - 4):
if np.sign(d[j]) > np.sign(d[j + 1]): # Detects zero-crossing
if np.sign(d[j + 1]) == 0:
continue
# if slope of derivative is larger than slope_thresh
if d[j] - d[j + 1] > slope_thresh:
# if height of peak is larger than amp_thresh
if y[j] > amp_thresh:
# the next section is very slow, and actually messes
# things up for images (discrete pixels),
# so by default, don't do subchannel precision in the
# 1D peakfind step.
if subchannel:
xx = np.zeros(peakgroup)
yy = np.zeros(peakgroup)
s = 0
for k in range(peakgroup):
groupindex = int(j + k - n + 1)
if groupindex < 1:
xx = xx[1:]
yy = yy[1:]
s += 1
continue
elif groupindex > y.shape[0] - 1:
xx = xx[:groupindex - 1]
yy = yy[:groupindex - 1]
break
xx[k - s] = x[groupindex]
yy[k - s] = y[groupindex]
avg = np.average(xx)
stdev = np.std(xx)
xxf = (xx - avg) / stdev
# Fit parabola to log10 of sub-group with
# centering and scaling
yynz = yy != 0
coef = np.polyfit(
xxf[yynz], np.log10(np.abs(yy[yynz])), 2)
c1 = coef[2]
c2 = coef[1]
c3 = coef[0]
with np.errstate(invalid='ignore'):
width = np.linalg.norm(stdev * 2.35703 /
(np.sqrt(2) * np.sqrt(-1 *
c3)))
# if the peak is too narrow for least-squares
# technique to work well, just use the max value
# of y in the sub-group of points near peak.
if peakgroup < 7:
height = np.max(yy)
position = xx[np.argmin(np.abs(yy - height))]
else:
position = - ((stdev * c2 / (2 * c3)) - avg)
height = np.exp(c1 - c3 * (c2 / (2 * c3)) ** 2)
# Fill results array P. One row for each peak
# detected, containing the
# peak position (x-value) and peak height (y-value).
else:
position = x[j]
height = y[j]
# no way to know peak width without
# the above measurements.
width = 0
if (not np.isnan(position) and 0 < position < x[-1]):
P = np.hstack((P,
np.array([(position, height, width)],
dtype=peak_dt)))
peak += 1
# return only the part of the array that contains peaks
# (not the whole maxpeakn x 3 array)
if len(P) > maxpeakn:
minh = np.sort(P['height'])[-maxpeakn]
P = P[P['height'] >= minh]
# Sorts the values as a function of position
P.sort(0)
return P
def interpolate1D(number_of_interpolation_points, data):
ip = number_of_interpolation_points
ch = len(data)
old_ax = np.linspace(0, 100, ch)
new_ax = np.linspace(0, 100, ch * ip - (ip - 1))
interpolator = interpolate.interp1d(old_ax, data)
return interpolator(new_ax)
def _estimate_shift1D(data, data_slice=slice(None), ref=None, ip=5,
interpolate=True, mask=None, **kwargs):
if bool(mask):
# asarray is required for consistency as argmax
# returns a numpy scalar array
return np.asarray(np.nan)
data = data[data_slice]
if interpolate is True:
data = interpolate1D(ip, data)
# Normalise the data before the cross correlation
ref = ref - ref.mean()
data = data - data.mean()
return (np.argmax(np.correlate(ref, data, 'full')) - len(ref) + 1).astype(float)
def _shift1D(data, **kwargs):
"""Used to shift a data array by a specified amount in axes units. Axis must
be passed as a kwarg. """
shift = kwargs.get('shift', 0.)
original_axis = kwargs.get('original_axis', None)
fill_value = kwargs.get('fill_value', np.nan)
kind = kwargs.get('kind', 'linear')
if np.isnan(shift) or shift == 0:
return data
#This is the interpolant function
si = interpolate.interp1d(original_axis, data, bounds_error=False,
fill_value=fill_value, kind=kind)
#Evaluate interpolated data at shifted positions
return si(original_axis-shift)
class Signal1D(BaseSignal, CommonSignal1D):
"""
"""
_signal_dimension = 1
def __init__(self, *args, **kwargs):
if kwargs.get('ragged', False):
raise ValueError("Signal1D can't be ragged.")
super().__init__(*args, **kwargs)
if self.axes_manager.signal_dimension != 1:
self.axes_manager.set_signal_dimension(1)
def _get_spikes_diagnosis_histogram_data(self, signal_mask=None,
navigation_mask=None,
**kwargs):
self._check_signal_dimension_equals_one()
dc = self.data
axis = self.axes_manager.signal_axes[0].axis
if signal_mask is not None:
dc = dc[..., ~signal_mask]
axis = axis[~signal_mask]
if navigation_mask is not None:
dc = dc[~navigation_mask, :]
der = np.abs(np.gradient(dc, axis, axis=-1))
n = ((~navigation_mask).sum() if navigation_mask else
self.axes_manager.navigation_size)
# arbitrary cutoff for number of spectra necessary before histogram
# data is compressed by finding maxima of each spectrum
tmp = BaseSignal(der) if n < 2000 else BaseSignal(
np.ravel(der.max(-1)))
# get histogram signal using smart binning and plot
return tmp.get_histogram(**kwargs)
def spikes_diagnosis(self, signal_mask=None,
navigation_mask=None,
**kwargs):
"""Plots a histogram to help in choosing the threshold for
spikes removal.
Parameters
----------
%s
%s
**kwargs : dict
Keyword arguments pass to
:py:meth:`~hyperspy.signal.signal.BaseSignal.get_histogram`
See also
--------
spikes_removal_tool
"""
tmph = self._get_spikes_diagnosis_histogram_data(signal_mask,
navigation_mask,
**kwargs)
tmph.plot()
# Customize plot appearance
plt.gca().set_title('')
plt.gca().fill_between(tmph.axes_manager[0].axis,
tmph.data,
facecolor='#fddbc7',
interpolate=True,
color='none')
ax = tmph._plot.signal_plot.ax
axl = tmph._plot.signal_plot.ax_lines[0]
axl.set_line_properties(color='#b2182b')
plt.xlabel('Derivative magnitude')
plt.ylabel('Log(Counts)')
ax.set_yscale('log')
ax.set_ylim(10 ** -1, plt.ylim()[1])
ax.set_xlim(plt.xlim()[0], 1.1 * plt.xlim()[1])
plt.draw()
spikes_diagnosis.__doc__ %= (SIGNAL_MASK_ARG, NAVIGATION_MASK_ARG)
def spikes_removal_tool(self, signal_mask=None, navigation_mask=None,
threshold='auto', interactive=True,
display=True, toolkit=None, **kwargs):
self._check_signal_dimension_equals_one()
if interactive:
sr = SpikesRemovalInteractive(self,
signal_mask=signal_mask,
navigation_mask=navigation_mask,
threshold=threshold)
return sr.gui(display=display, toolkit=toolkit)
else:
sr = SpikesRemoval(self,
signal_mask=signal_mask,
navigation_mask=navigation_mask,
threshold=threshold, **kwargs)
sr.remove_all_spikes()
return sr
spikes_removal_tool.__doc__ = SPIKES_REMOVAL_TOOL_DOCSTRING % (
SIGNAL_MASK_ARG, NAVIGATION_MASK_ARG, "", DISPLAY_DT, TOOLKIT_DT,)
def create_model(self, dictionary=None):
"""Create a model for the current data.
Returns
-------
model : `Model1D` instance.
"""
model = Model1D(self, dictionary=dictionary)
return model
def shift1D(
self,
shift_array,
interpolation_method='linear',
crop=True,
expand=False,
fill_value=np.nan,
parallel=None,
show_progressbar=None,
max_workers=None,
):
"""Shift the data in place over the signal axis by the amount specified
by an array.
Parameters
----------
shift_array : BaseSignal or np.array
An array containing the shifting amount. It must have the same
`axes_manager._navigation_shape`
`axes_manager._navigation_shape_in_array` shape.
interpolation_method : str or int
Specifies the kind of interpolation as a string ('linear',
'nearest', 'zero', 'slinear', 'quadratic, 'cubic') or as an
integer specifying the order of the spline interpolator to
use.
%s
expand : bool
If True, the data will be expanded to fit all data after alignment.
Overrides `crop`.
fill_value : float
If crop is False fill the data outside of the original
interval with the given value where needed.
%s
%s
%s
Raises
------
SignalDimensionError
If the signal dimension is not 1.
NotImplementedError
If the signal axis is a non-uniform axis.
"""
if not np.any(shift_array):
# Nothing to do, the shift array if filled with zeros
return
if show_progressbar is None:
show_progressbar = preferences.General.show_progressbar
self._check_signal_dimension_equals_one()
axis = self.axes_manager.signal_axes[0]
if not axis.is_uniform:
raise NotImplementedError(
"This operation is not implemented for non-uniform axes.")
# Figure out min/max shifts, and translate to shifts in index as well
minimum, maximum = np.nanmin(shift_array), np.nanmax(shift_array)
if minimum < 0:
ihigh = 1 + axis.value2index(
axis.high_value + minimum,
rounding=math.floor)
else:
ihigh = axis.high_index + 1
if maximum > 0:
ilow = axis.value2index(axis.offset + maximum,
rounding=math.ceil)
else:
ilow = axis.low_index
if expand:
if self._lazy:
ind = axis.index_in_array
pre_shape = list(self.data.shape)
post_shape = list(self.data.shape)
pre_chunks = list(self.data.chunks)
post_chunks = list(self.data.chunks)
pre_shape[ind] = axis.high_index - ihigh + 1
post_shape[ind] = ilow - axis.low_index
for chunks, shape in zip((pre_chunks, post_chunks),
(pre_shape, post_shape)):
maxsize = min(np.max(chunks[ind]), shape[ind])
num = np.ceil(shape[ind] / maxsize)
chunks[ind] = tuple(len(ar) for ar in
np.array_split(np.arange(shape[ind]),
num))
pre_array = da.full(tuple(pre_shape),
fill_value,
chunks=tuple(pre_chunks))
post_array = da.full(tuple(post_shape),
fill_value,
chunks=tuple(post_chunks))
self.data = da.concatenate((pre_array, self.data, post_array),
axis=ind).rechunk({ind:-1})
else:
padding = []
for i in range(self.data.ndim):
if i == axis.index_in_array:
padding.append((axis.high_index - ihigh + 1,
ilow - axis.low_index))
else:
padding.append((0, 0))
self.data = np.pad(self.data, padding, mode='constant',
constant_values=(fill_value,))
axis.offset += minimum
axis.size += axis.high_index - ihigh + 1 + ilow - axis.low_index
if isinstance(shift_array, np.ndarray):
shift_array = BaseSignal(shift_array.squeeze()).T
self.map(_shift1D,
shift=shift_array,
original_axis=axis.axis,
fill_value=fill_value,
kind=interpolation_method,
show_progressbar=show_progressbar,
parallel=parallel,
max_workers=max_workers,
ragged=False)
if crop and not expand:
_logger.debug("Cropping %s from index %i to %i"
% (self, ilow, ihigh))
self.crop(axis.index_in_axes_manager,
ilow,
ihigh)
self.events.data_changed.trigger(obj=self)
shift1D.__doc__ %= (CROP_PARAMETER_DOC, SHOW_PROGRESSBAR_ARG, PARALLEL_ARG, MAX_WORKERS_ARG)
def interpolate_in_between(
self,
start,
end,
delta=3,
show_progressbar=None,
parallel=None,
max_workers=None,
**kwargs,
):
"""Replace the data in a given range by interpolation.
The operation is performed in place.
Parameters
----------
start, end : int or float
The limits of the interval. If int, they are taken as the
axis index. If float, they are taken as the axis value.
delta : int or float
The windows around the (start, end) to use for interpolation. If
int, they are taken as index steps. If float, they are taken in
units of the axis value.
%s
%s
%s
**kwargs :
All extra keyword arguments are passed to
:py:func:`scipy.interpolate.interp1d`. See the function documentation
for details.
Raises
------
SignalDimensionError
If the signal dimension is not 1.
"""
if show_progressbar is None:
show_progressbar = preferences.General.show_progressbar
self._check_signal_dimension_equals_one()
axis = self.axes_manager.signal_axes[0]
i1 = axis._get_index(start)
i2 = axis._get_index(end)
if isinstance(delta, float):
if isinstance(start, int):
start = axis.axis[start]
if isinstance(end, int):
end = axis.axis[end]
i0 = axis._get_index(start-delta) if start-delta < axis.low_value else 0
i3 = axis._get_index(end+delta) if end+delta > axis.high_value else axis.size
else:
i0 = int(np.clip(i1 - delta, 0, np.inf))
i3 = int(np.clip(i2 + delta, 0, axis.size))
def interpolating_function(dat):
dat_int = interpolate.interp1d(
list(range(i0, i1)) + list(range(i2, i3)),
dat[i0:i1].tolist() + dat[i2:i3].tolist(),
**kwargs)
dat[i1:i2] = dat_int(list(range(i1, i2)))
return dat
self.map(
interpolating_function,
ragged=False,
parallel=parallel,
show_progressbar=show_progressbar,
max_workers=max_workers,
)
self.events.data_changed.trigger(obj=self)
interpolate_in_between.__doc__ %= (SHOW_PROGRESSBAR_ARG, PARALLEL_ARG, MAX_WORKERS_ARG)
def estimate_shift1D(
self,
start=None,
end=None,
reference_indices=None,
max_shift=None,
interpolate=True,
number_of_interpolation_points=5,
mask=None,
show_progressbar=None,
parallel=None,
max_workers=None,
):
"""Estimate the shifts in the current signal axis using
cross-correlation.
This method can only estimate the shift by comparing
unidimensional features that should not change the position in
the signal axis. To decrease the memory usage, the time of
computation and the accuracy of the results it is convenient to
select the feature of interest providing sensible values for
`start` and `end`. By default interpolation is used to obtain
subpixel precision.
Parameters
----------
start, end : int, float or None
The limits of the interval. If int they are taken as the
axis index. If float they are taken as the axis value.
reference_indices : tuple of ints or None
Defines the coordinates of the spectrum that will be used
as reference. If None the spectrum at the current
coordinates is used for this purpose.
max_shift : int
"Saturation limit" for the shift.
interpolate : bool
If True, interpolation is used to provide sub-pixel
accuracy.
number_of_interpolation_points : int
Number of interpolation points. Warning: making this number
too big can saturate the memory
mask : `BaseSignal` of bool.
It must have signal_dimension = 0 and navigation_shape equal to the
current signal. Where mask is True the shift is not computed
and set to nan.
%s
%s
%s
Returns
-------
An array with the result of the estimation in the axis units.
Although the computation is performed in batches if the signal is
lazy, the result is computed in memory because it depends on the
current state of the axes that could change later on in the workflow.
Raises
------
SignalDimensionError
If the signal dimension is not 1.
NotImplementedError
If the signal axis is a non-uniform axis.
"""
if show_progressbar is None:
show_progressbar = preferences.General.show_progressbar
self._check_signal_dimension_equals_one()
ip = number_of_interpolation_points + 1
axis = self.axes_manager.signal_axes[0]
if not axis.is_uniform:
raise NotImplementedError(
"The function is not implemented for non-uniform signal axes.")
self._check_navigation_mask(mask)
# we compute for now
if isinstance(start, da.Array):
start = start.compute()
if isinstance(end, da.Array):
end = end.compute()
i1, i2 = axis._get_index(start), axis._get_index(end)
if reference_indices is None:
reference_indices = self.axes_manager.indices
ref = self.inav[reference_indices].data[i1:i2]
if interpolate is True:
ref = interpolate1D(ip, ref)
shift_signal = self.map(
_estimate_shift1D,
mask=mask,
data_slice=slice(i1, i2),
ref=ref,
ip=ip,
interpolate=interpolate,
ragged=False,
parallel=parallel,
inplace=False,
show_progressbar=show_progressbar,
max_workers=max_workers,
)
shift_array = shift_signal.data
if max_shift is not None:
if interpolate is True:
max_shift *= ip
shift_array.clip(-max_shift, max_shift)
if interpolate is True:
shift_array = shift_array / ip
shift_array = shift_array * axis.scale
if shift_signal._lazy:
# We must compute right now because otherwise any changes to the
# axes_manager of the signal later in the workflow may result in
# a wrong shift_array
shift_array = shift_array.compute()
return shift_array
estimate_shift1D.__doc__ %= (SHOW_PROGRESSBAR_ARG, PARALLEL_ARG, MAX_WORKERS_ARG)
def align1D(self,
start=None,
end=None,
reference_indices=None,
max_shift=None,
interpolate=True,
number_of_interpolation_points=5,
interpolation_method='linear',
crop=True,
expand=False,
fill_value=np.nan,
also_align=None,
mask=None,
show_progressbar=None,
iterpath="flyback"):
"""Estimate the shifts in the signal axis using
cross-correlation and use the estimation to align the data in place.
This method can only estimate the shift by comparing
unidimensional
features that should not change the position.
To decrease memory usage, time of computation and improve
accuracy it is convenient to select the feature of interest
setting the `start` and `end` keywords. By default interpolation is
used to obtain subpixel precision.
Parameters
----------
start, end : int, float or None
The limits of the interval. If int they are taken as the
axis index. If float they are taken as the axis value.
reference_indices : tuple of ints or None
Defines the coordinates of the spectrum that will be used
as reference. If None the spectrum at the current
coordinates is used for this purpose.
max_shift : int
"Saturation limit" for the shift.
interpolate : bool
If True, interpolation is used to provide sub-pixel
accuracy.
number_of_interpolation_points : int
Number of interpolation points. Warning: making this number
too big can saturate the memory
interpolation_method : str or int
Specifies the kind of interpolation as a string ('linear',
'nearest', 'zero', 'slinear', 'quadratic, 'cubic') or as an
integer specifying the order of the spline interpolator to
use.
%s
expand : bool
If True, the data will be expanded to fit all data after alignment.
Overrides `crop`.
fill_value : float
If crop is False fill the data outside of the original
interval with the given value where needed.
also_align : list of signals, None
A list of BaseSignal instances that has exactly the same
dimensions as this one and that will be aligned using the shift map
estimated using the this signal.
mask : `BaseSignal` or bool data type.
It must have signal_dimension = 0 and navigation_shape equal to the
current signal. Where mask is True the shift is not computed
and set to nan.
%s
Returns
-------
An array with the result of the estimation.
Raises
------
SignalDimensionError
If the signal dimension is not 1.
See also
--------
estimate_shift1D
"""
if also_align is None:
also_align = []
self._check_signal_dimension_equals_one()
if self._lazy:
_logger.warning('In order to properly expand, the lazy '
'reference signal will be read twice (once to '
'estimate shifts, and second time to shift '
'appropriately), which might take a long time. '
'Use expand=False to only pass through the data '
'once.')
with self.axes_manager.switch_iterpath(iterpath):
shift_array = self.estimate_shift1D(
start=start,
end=end,
reference_indices=reference_indices,
max_shift=max_shift,
interpolate=interpolate,
number_of_interpolation_points=number_of_interpolation_points,
mask=mask,
show_progressbar=show_progressbar)
signals_to_shift = [self] + also_align
for signal in signals_to_shift:
signal.shift1D(shift_array=shift_array,
interpolation_method=interpolation_method,
crop=crop,
fill_value=fill_value,
expand=expand,
show_progressbar=show_progressbar)
align1D.__doc__ %= (CROP_PARAMETER_DOC, SHOW_PROGRESSBAR_ARG)
def integrate_in_range(self, signal_range='interactive',
display=True, toolkit=None):
"""Sums the spectrum over an energy range, giving the integrated
area.
The energy range can either be selected through a GUI or the command
line.
Parameters
----------
signal_range : a tuple of this form (l, r) or "interactive"
l and r are the left and right limits of the range. They can be
numbers or None, where None indicates the extremes of the interval.
If l and r are floats the `signal_range` will be in axis units (for
example eV). If l and r are integers the `signal_range` will be in
index units. When `signal_range` is "interactive" (default) the
range is selected using a GUI. Note that ROIs can be used
in place of a tuple.
Returns
--------
integrated_spectrum : `BaseSignal` subclass
See Also
--------
integrate_simpson
Examples
--------
Using the GUI
>>> s = hs.signals.Signal1D(range(1000))
>>> s.integrate_in_range() #doctest: +SKIP
Using the CLI
>>> s_int = s.integrate_in_range(signal_range=(560,None))
Selecting a range in the axis units, by specifying the
signal range with floats.
>>> s_int = s.integrate_in_range(signal_range=(560.,590.))
Selecting a range using the index, by specifying the
signal range with integers.
>>> s_int = s.integrate_in_range(signal_range=(100,120))
"""
from hyperspy.misc.utils import deprecation_warning
msg = (
"The `Signal1D.integrate_in_range` method is deprecated and will "
"be removed in v2.0. Use a `roi.SpanRoi` followed by `integrate1D` "
"instead.")
deprecation_warning(msg)
if signal_range == 'interactive':
self_copy = self.deepcopy()
ia = IntegrateArea(self_copy, signal_range)
ia.gui(display=display, toolkit=toolkit)
integrated_signal1D = self_copy
else:
integrated_signal1D = self._integrate_in_range_commandline(
signal_range)
return integrated_signal1D
def _integrate_in_range_commandline(self, signal_range):
e1 = signal_range[0]
e2 = signal_range[1]
integrated_signal1D = self.isig[e1:e2].integrate1D(-1)
return integrated_signal1D
def calibrate(self, display=True, toolkit=None):
"""
Calibrate the spectral dimension using a gui.
It displays a window where the new calibration can be set by:
* setting the values of offset, units and scale directly
* or selecting a range by dragging the mouse on the spectrum figure
and setting the new values for the given range limits
Parameters
----------
%s
%s
Notes
-----
For this method to work the output_dimension must be 1.
Raises
------
SignalDimensionError
If the signal dimension is not 1.
NotImplementedError
If called with a non-uniform axes.
"""
self._check_signal_dimension_equals_one()
calibration = Signal1DCalibration(self)
return calibration.gui(display=display, toolkit=toolkit)
calibrate.__doc__ %= (DISPLAY_DT, TOOLKIT_DT)
def smooth_savitzky_golay(
self,
polynomial_order=None,
window_length=None,
differential_order=0,
parallel=None,
max_workers=None,
display=True,
toolkit=None,
):
"""
Apply a Savitzky-Golay filter to the data in place.
If `polynomial_order` or `window_length` or `differential_order` are
None the method is run in interactive mode.
Parameters
----------
polynomial_order : int, optional
The order of the polynomial used to fit the samples.
`polyorder` must be less than `window_length`.
window_length : int, optional
The length of the filter window (i.e. the number of coefficients).
`window_length` must be a positive odd integer.
differential_order: int, optional
The order of the derivative to compute. This must be a
nonnegative integer. The default is 0, which means to filter
the data without differentiating.
%s
%s
%s
%s
Raises
------
NotImplementedError
If the signal axis is a non-uniform axis.
Notes
-----
More information about the filter in `scipy.signal.savgol_filter`.
"""
self._check_signal_dimension_equals_one()
if not self.axes_manager.signal_axes[0].is_uniform:
raise NotImplementedError(
"This functionality is not implement for signals with non-uniform axes. ")
"Consider using `smooth_lowess` instead."
if (polynomial_order is not None and
window_length is not None):
axis = self.axes_manager.signal_axes[0]
self.map(savgol_filter, window_length=window_length,
polyorder=polynomial_order, deriv=differential_order,
delta=axis.scale, ragged=False, parallel=parallel, max_workers=max_workers)
else:
# Interactive mode
smoother = SmoothingSavitzkyGolay(self)
smoother.differential_order = differential_order
if polynomial_order is not None:
smoother.polynomial_order = polynomial_order
if window_length is not None:
smoother.window_length = window_length
return smoother.gui(display=display, toolkit=toolkit)
smooth_savitzky_golay.__doc__ %= (PARALLEL_ARG, MAX_WORKERS_ARG, DISPLAY_DT, TOOLKIT_DT)
def smooth_lowess(
self,
smoothing_parameter=None,
number_of_iterations=None,
show_progressbar=None,
parallel=None,
max_workers=None,
display=True,
toolkit=None,
):
"""
Lowess data smoothing in place.
If `smoothing_parameter` or `number_of_iterations` are None the method
is run in interactive mode.
Parameters
----------
smoothing_parameter: float or None
Between 0 and 1. The fraction of the data used
when estimating each y-value.
number_of_iterations: int or None
The number of residual-based reweightings
to perform.
%s
%s
%s
%s
%s
Raises
------
SignalDimensionError
If the signal dimension is not 1.
"""
self._check_signal_dimension_equals_one()
if smoothing_parameter is None or number_of_iterations is None:
smoother = SmoothingLowess(self)
if smoothing_parameter is not None:
smoother.smoothing_parameter = smoothing_parameter
if number_of_iterations is not None:
smoother.number_of_iterations = number_of_iterations
return smoother.gui(display=display, toolkit=toolkit)
else:
self.map(lowess,
x=self.axes_manager[-1].axis,
f=smoothing_parameter,
n_iter=number_of_iterations,
show_progressbar=show_progressbar,
ragged=False,
parallel=parallel,
max_workers=max_workers)
smooth_lowess.__doc__ %= (SHOW_PROGRESSBAR_ARG, PARALLEL_ARG, MAX_WORKERS_ARG, DISPLAY_DT, TOOLKIT_DT)
def smooth_tv(
self,
smoothing_parameter=None,
show_progressbar=None,
parallel=None,
max_workers=None,
display=True,
toolkit=None,
):
"""
Total variation data smoothing in place.
Parameters
----------
smoothing_parameter: float or None
Denoising weight relative to L2 minimization. If None the method
is run in interactive mode.
%s
%s
%s
%s
%s
Raises
------
SignalDimensionError
If the signal dimension is not 1.
NotImplementedError
If the signal axis is a non-uniform axis.
"""
self._check_signal_dimension_equals_one()
if not self.axes_manager.signal_axes[0].is_uniform:
raise NotImplementedError(
"This functionality is not implement for signals with non-uniform axes. ")
"Consider using `smooth_lowess` instead."
if smoothing_parameter is None:
smoother = SmoothingTV(self)
return smoother.gui(display=display, toolkit=toolkit)
else:
self.map(_tv_denoise_1d, weight=smoothing_parameter,
ragged=False,
show_progressbar=show_progressbar,
parallel=parallel,
max_workers=max_workers)
smooth_tv.__doc__ %= (SHOW_PROGRESSBAR_ARG, PARALLEL_ARG, MAX_WORKERS_ARG, DISPLAY_DT, TOOLKIT_DT)
def filter_butterworth(self,
cutoff_frequency_ratio=None,
type='low',
order=2, display=True, toolkit=None):
"""
Butterworth filter in place.
Parameters
----------
%s
%s
Raises
------
SignalDimensionError
If the signal dimension is not 1.
NotImplementedError
If the signal axis is a non-uniform axis.
"""
if not self.axes_manager.signal_axes[0].is_uniform:
raise NotImplementedError(
"This functionality is not implement for signals with non-uniform axes. ")
"Consider using `smooth_lowess` instead."
self._check_signal_dimension_equals_one()
smoother = ButterworthFilter(self)
if cutoff_frequency_ratio is not None:
smoother.cutoff_frequency_ratio = cutoff_frequency_ratio
smoother.type = type
smoother.order = order
smoother.apply()
else:
return smoother.gui(display=display, toolkit=toolkit)
filter_butterworth.__doc__ %= (DISPLAY_DT, TOOLKIT_DT)
def _remove_background_cli(
self, signal_range, background_estimator, fast=True,
zero_fill=False, show_progressbar=None, model=None,
return_model=False):
""" See :py:meth:`~hyperspy._signal1d.signal1D.remove_background`. """
if model is None:
from hyperspy.models.model1d import Model1D
model = Model1D(self)
if background_estimator not in model:
model.append(background_estimator)
background_estimator.estimate_parameters(
self,
signal_range[0],
signal_range[1],
only_current=False)
if not fast:
model.set_signal_range(signal_range[0], signal_range[1])
model.multifit(show_progressbar=show_progressbar,
iterpath='serpentine')
model.reset_signal_range()
if self._lazy:
result = self - model.as_signal(show_progressbar=show_progressbar)
else:
try:
axis = self.axes_manager.signal_axes[0]
if is_binned(self):
# in v2 replace by
# if axis.is_binned:
if axis.is_uniform:
scale_factor = axis.scale
else:
scale_factor = np.gradient(axis.axis)
else:
scale_factor = 1
bkg = background_estimator.function_nd(axis.axis) * scale_factor
result = self - bkg
except MemoryError:
result = self - model.as_signal(
show_progressbar=show_progressbar)
if zero_fill:
if self._lazy:
low_idx = result.axes_manager[-1].value2index(signal_range[0])
z = da.zeros(low_idx, chunks=(low_idx,))
cropped_da = result.data[low_idx:]
result.data = da.concatenate([z, cropped_da])
else:
result.isig[:signal_range[0]] = 0
if return_model:
if fast:
# Calculate the variance for each navigation position only when
# using fast, otherwise the chisq is already calculated when
# doing the multifit
d = result.data[..., np.where(model.channel_switches)[0]]
variance = model._get_variance(only_current=False)
d *= d / (1. * variance) # d = difference^2 / variance.
model.chisq.data = d.sum(-1)
result = (result, model)
return result
def remove_background(
self,
signal_range='interactive',
background_type='Power law',
polynomial_order=2,
fast=True,
zero_fill=False,
plot_remainder=True,
show_progressbar=None,
return_model=False,
display=True,
toolkit=None):
"""
Remove the background, either in place using a GUI or returned as a new
spectrum using the command line. The fast option is not accurate for
most background types - except Gaussian, Offset and
Power law - but it is useful to estimate the initial fitting parameters
before performing a full fit.
Parameters
----------
signal_range : "interactive", tuple of ints or floats, optional
If this argument is not specified, the signal range has to be
selected using a GUI. And the original spectrum will be replaced.
If tuple is given, a spectrum will be returned.
background_type : str
The type of component which should be used to fit the background.
Possible components: Doniach, Gaussian, Lorentzian, Offset,
Polynomial, PowerLaw, Exponential, SkewNormal, SplitVoigt, Voigt.
If Polynomial is used, the polynomial order can be specified
polynomial_order : int, default 2
Specify the polynomial order if a Polynomial background is used.
fast : bool
If True, perform an approximative estimation of the parameters.
If False, the signal is fitted using non-linear least squares
afterwards. This is slower compared to the estimation but
often more accurate.
zero_fill : bool
If True, all spectral channels lower than the lower bound of the
fitting range will be set to zero (this is the default behavior
of Gatan's DigitalMicrograph). Setting this value to False
allows for inspection of the quality of background fit throughout
the pre-fitting region.
plot_remainder : bool
If True, add a (green) line previewing the remainder signal after
background removal. This preview is obtained from a Fast calculation
so the result may be different if a NLLS calculation is finally
performed.
return_model : bool
If True, the background model is returned. The chi² can be obtained
from this model using
:py:meth:`~hyperspy.models.model1d.Model1D.chisqd`.
%s
%s
%s
Returns
-------
{None, signal, background_model or (signal, background_model)}
If signal_range is not 'interactive', the signal with background
subtracted is returned. If return_model is True, returns the
background model, otherwise, the GUI widget dictionary is returned
if `display=False` - see the display parameter documentation.
Examples
--------
Using GUI, replaces spectrum s
>>> s = hs.signals.Signal1D(range(1000))
>>> s.remove_background() #doctest: +SKIP
Using command line, returns a Signal1D:
>>> s.remove_background(signal_range=(400,450),
background_type='PowerLaw')
<Signal1D, title: , dimensions: (|1000)>
Using a full model to fit the background:
>>> s.remove_background(signal_range=(400,450), fast=False)
<Signal1D, title: , dimensions: (|1000)>
Returns background subtracted and the model:
>>> s.remove_background(signal_range=(400,450),
fast=False,
return_model=True)
(<Signal1D, title: , dimensions: (|1000)>, <Model1D>)
Raises
------
SignalDimensionError
If the signal dimension is not 1.
"""
self._check_signal_dimension_equals_one()
# Create model here, so that we can return it
from hyperspy.models.model1d import Model1D
model = Model1D(self)
if signal_range == 'interactive':
br = BackgroundRemoval(self, background_type=background_type,
polynomial_order=polynomial_order,
fast=fast,
plot_remainder=plot_remainder,
show_progressbar=show_progressbar,
zero_fill=zero_fill,
model=model)
gui_dict = br.gui(display=display, toolkit=toolkit)
if return_model:
return model
else:
# for testing purposes
return gui_dict
else:
background_estimator = _get_background_estimator(
background_type, polynomial_order)[0]
result = self._remove_background_cli(
signal_range=signal_range,
background_estimator=background_estimator,
fast=fast,
zero_fill=zero_fill,
show_progressbar=show_progressbar,
model=model,
return_model=return_model)
return result
remove_background.__doc__ %= (SHOW_PROGRESSBAR_ARG, DISPLAY_DT, TOOLKIT_DT)
@interactive_range_selector
def crop_signal1D(self, left_value=None, right_value=None,):
"""Crop in place the spectral dimension.
Parameters
----------
left_value, right_value : int, float or None
If int the values are taken as indices. If float they are
converted to indices using the spectral axis calibration.
If left_value is None crops from the beginning of the axis.
If right_value is None crops up to the end of the axis. If
both are None the interactive cropping interface is activated
enabling cropping the spectrum using a span selector in the
signal plot.
Raises
------
SignalDimensionError
If the signal dimension is not 1.
"""
self._check_signal_dimension_equals_one()
try:
left_value, right_value = left_value
except TypeError:
# It was not a ROI, we carry on
pass
self.crop(axis=self.axes_manager.signal_axes[0].index_in_axes_manager,
start=left_value, end=right_value)
def gaussian_filter(self, FWHM):
"""Applies a Gaussian filter in the spectral dimension in place.
Parameters
----------
FWHM : float
The Full Width at Half Maximum of the gaussian in the
spectral axis units
Raises
------
ValueError
If FWHM is equal or less than zero.
SignalDimensionError
If the signal dimension is not 1.
NotImplementedError
If the signal axis is a non-uniform axis.
"""
self._check_signal_dimension_equals_one()
for _axis in self.axes_manager.signal_axes:
if not _axis.is_uniform:
raise NotImplementedError(
"The function is not implemented for non-uniform axes.")
if FWHM <= 0:
raise ValueError(
"FWHM must be greater than zero")
axis = self.axes_manager.signal_axes[0]
FWHM *= 1 / axis.scale
self.map(gaussian_filter1d, sigma=FWHM / 2.35482, ragged=False)
def hanning_taper(self, side='both', channels=None, offset=0):
"""Apply a hanning taper to the data in place.
Parameters
----------
side : 'left', 'right' or 'both'
Specify which side to use.
channels : None or int
The number of channels to taper. If None 5% of the total
number of channels are tapered.
offset : int
Returns
-------
channels
Raises
------
SignalDimensionError
If the signal dimension is not 1.
"""
if not np.issubdtype(self.data.dtype, np.floating):
raise TypeError("The data dtype should be `float`. It can be "
"changed by using the `change_dtype('float')` "
"method of the signal.")
# TODO: generalize it
self._check_signal_dimension_equals_one()
if channels is None:
channels = int(round(len(self()) * 0.02))
if channels < 20:
channels = 20
dc = self._data_aligned_with_axes
if self._lazy and offset != 0:
shp = dc.shape
if len(shp) == 1:
nav_shape = ()
nav_chunks = ()
else:
nav_shape = shp[:-1]
nav_chunks = dc.chunks[:-1]
zeros = da.zeros(nav_shape + (offset,),
chunks=nav_chunks + ((offset,),))
if side == 'left' or side == 'both':
if self._lazy:
tapered = dc[..., offset:channels + offset]
tapered *= np.hanning(2 * channels)[:channels]
therest = dc[..., channels + offset:]
thelist = [] if offset == 0 else [zeros]
thelist.extend([tapered, therest])
dc = da.concatenate(thelist, axis=-1)
else:
dc[..., offset:channels + offset] *= (
np.hanning(2 * channels)[:channels])
dc[..., :offset] *= 0.
if side == 'right' or side == 'both':
rl = None if offset == 0 else -offset
if self._lazy:
therest = dc[..., :-channels - offset]
tapered = dc[..., -channels - offset:rl]
tapered *= np.hanning(2 * channels)[-channels:]
thelist = [therest, tapered]
if offset != 0:
thelist.append(zeros)
dc = da.concatenate(thelist, axis=-1)
else:
dc[..., -channels - offset:rl] *= (
np.hanning(2 * channels)[-channels:])
if offset != 0:
dc[..., -offset:] *= 0.
if self._lazy:
self.data = dc
self.events.data_changed.trigger(obj=self)
return channels
def find_peaks1D_ohaver(self, xdim=None,
slope_thresh=0,
amp_thresh=None,
subchannel=True,
medfilt_radius=5,
maxpeakn=30000,
peakgroup=10,
parallel=None,
max_workers=None):
"""Find positive peaks along a 1D Signal. It detects peaks by looking
for downward zero-crossings in the first derivative that exceed
'slope_thresh'.
'slope_thresh' and 'amp_thresh', control sensitivity: higher
values will neglect broad peaks (slope) and smaller features (amp),
respectively.
`peakgroup` is the number of points around the top of the peak
that are taken to estimate the peak height. For spikes or very
narrow peaks, set `peakgroup` to 1 or 2; for broad or noisy peaks,
make `peakgroup` larger to reduce the effect of noise.
Parameters
----------
slope_thresh : float, optional
1st derivative threshold to count the peak;
higher values will neglect broader features;
default is set to 0.
amp_thresh : float, optional
intensity threshold below which peaks are ignored;
higher values will neglect smaller features;
default is set to 10%% of max(y).
medfilt_radius : int, optional
median filter window to apply to smooth the data
(see :py:func:`scipy.signal.medfilt`);
if 0, no filter will be applied;
default is set to 5.
peakgroup : int, optional
number of points around the "top part" of the peak
that are taken to estimate the peak height;
default is set to 10
maxpeakn : int, optional
number of maximum detectable peaks;
default is set to 5000.
subchannel : bool, default True
default is set to True.
%s
%s
Returns
-------
structured array of shape (npeaks) containing fields: 'position',
'width', and 'height' for each peak.
Raises
------
SignalDimensionError
If the signal dimension is not 1.
"""
# TODO: add scipy.signal.find_peaks_cwt
self._check_signal_dimension_equals_one()
axis = self.axes_manager.signal_axes[0].axis
peaks = self.map(find_peaks_ohaver,
x=axis,
slope_thresh=slope_thresh,
amp_thresh=amp_thresh,
medfilt_radius=medfilt_radius,
maxpeakn=maxpeakn,
peakgroup=peakgroup,
subchannel=subchannel,
ragged=True,
parallel=parallel,
max_workers=max_workers,
inplace=False,
lazy_output=False)
return peaks.data
find_peaks1D_ohaver.__doc__ %= (PARALLEL_ARG, MAX_WORKERS_ARG)
def estimate_peak_width(
self,
factor=0.5,
window=None,
return_interval=False,
parallel=None,
show_progressbar=None,
max_workers=None,
):
"""Estimate the width of the highest intensity of peak
of the spectra at a given fraction of its maximum.
It can be used with asymmetric peaks. For accurate results any
background must be previously subtracted.
The estimation is performed by interpolation using cubic splines.
Parameters
----------
factor : 0 < float < 1
The default, 0.5, estimates the FWHM.
window : None or float
The size of the window centred at the peak maximum
used to perform the estimation.
The window size must be chosen with care: if it is narrower
than the width of the peak at some positions or if it is
so wide that it includes other more intense peaks this
method cannot compute the width and a NaN is stored instead.
return_interval: bool
If True, returns 2 extra signals with the positions of the
desired height fraction at the left and right of the
peak.
%s
%s
%s
Returns
-------
width or [width, left, right], depending on the value of
`return_interval`.
Notes
-----
Parallel operation of this function is not supported
on Windows platforms.
"""
if show_progressbar is None:
show_progressbar = preferences.General.show_progressbar
self._check_signal_dimension_equals_one()
if not 0 < factor < 1:
raise ValueError("factor must be between 0 and 1.")
if parallel != False and os.name in ["nt", "dos"]: # pragma: no cover
# Due to a scipy bug where scipy.interpolate.UnivariateSpline
# appears to not be thread-safe on Windows, we raise a warning
# here. See https://github.com/hyperspy/hyperspy/issues/2320
# Until/if the scipy bug is fixed, we should do this.
_logger.warning(
"Parallel operation is not supported on Windows. "
"Setting `parallel=False`"
)
parallel = False
# axis is a keyword already used by self.map so calling this axis_arg
# to avoid "parameter collision
axis_arg = self.axes_manager.signal_axes[0]
maxval = self.axes_manager.navigation_size
show_progressbar = show_progressbar and maxval > 0
def estimating_function(spectrum,
window=None,
factor=0.5,
axis_arg=None):
x = axis_arg.axis
if window is not None:
vmax = axis_arg.index2value(spectrum.argmax())
slices = axis_arg._get_array_slices(
slice(vmax - window * 0.5, vmax + window * 0.5))
spectrum = spectrum[slices]
x = x[slices]
spline = interpolate.UnivariateSpline(
x,
spectrum - factor * spectrum.max(),
s=0)
roots = spline.roots()
if len(roots) == 2:
return np.array(roots)
else:
return np.full((2,), np.nan)
both = self.map(
estimating_function,
window=window,
factor=factor,
axis_arg=axis_arg,
ragged=False,
inplace=False,
parallel=parallel,
show_progressbar=show_progressbar,
max_workers=max_workers,
)
left, right = both.T.split()
width = right - left
if factor == 0.5:
width.metadata.General.title = (
self.metadata.General.title + " FWHM")
left.metadata.General.title = (
self.metadata.General.title + " FWHM left position")
right.metadata.General.title = (
self.metadata.General.title + " FWHM right position")
else:
width.metadata.General.title = (
self.metadata.General.title +
" full-width at %.1f maximum" % factor)
left.metadata.General.title = (
self.metadata.General.title +
" full-width at %.1f maximum left position" % factor)
right.metadata.General.title = (
self.metadata.General.title +
" full-width at %.1f maximum right position" % factor)
for signal in (left, width, right):
signal.axes_manager.set_signal_dimension(0)
signal.set_signal_type("")
if return_interval is True:
return [width, left, right]
else:
return width
estimate_peak_width.__doc__ %= (SHOW_PROGRESSBAR_ARG, PARALLEL_ARG, MAX_WORKERS_ARG)
def plot(self,
navigator="auto",
plot_markers=True,
autoscale='v',
norm="auto",
axes_manager=None,
navigator_kwds={},
**kwargs):
"""%s
%s
%s
"""
for c in autoscale:
if c not in ['x', 'v']:
raise ValueError("`autoscale` only accepts 'x', 'v' as "
"valid characters.")
super().plot(navigator=navigator,
plot_markers=plot_markers,
autoscale=autoscale,
norm=norm,
axes_manager=axes_manager,
navigator_kwds=navigator_kwds,
**kwargs)
plot.__doc__ %= (BASE_PLOT_DOCSTRING, BASE_PLOT_DOCSTRING_PARAMETERS,
PLOT1D_DOCSTRING)
class LazySignal1D(LazySignal, Signal1D):
"""
"""
_lazy = True
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.axes_manager.set_signal_dimension(1)
|
jat255/hyperspy
|
hyperspy/_signals/signal1d.py
|
Python
|
gpl-3.0
| 64,370
|
[
"Gaussian"
] |
b166d595fb65d8c96b0bde5117af76eca1e54d3f1f9de58135ba22993a0f170f
|
# -*- coding: utf-8 -*-
#
# Copyright © 2009-2010 Pierre Raybaut
# Licensed under the terms of the MIT License
# (see spyderlib/__init__.py for details)
"""Utilities and wrappers around inspect module"""
from __future__ import print_function
import inspect
import re
# Local imports:
from spyderlib.utils import encoding
from spyderlib.py3compat import (is_text_string, builtins, get_meth_func,
get_meth_class_inst, get_meth_class,
get_func_defaults, to_text_string)
SYMBOLS = r"[^\'\"a-zA-Z0-9_.]"
def getobj(txt, last=False):
"""Return the last valid object name in string"""
txt_end = ""
for startchar, endchar in ["[]", "()"]:
if txt.endswith(endchar):
pos = txt.rfind(startchar)
if pos:
txt_end = txt[pos:]
txt = txt[:pos]
tokens = re.split(SYMBOLS, txt)
token = None
try:
while token is None or re.match(SYMBOLS, token):
token = tokens.pop()
if token.endswith('.'):
token = token[:-1]
if token.startswith('.'):
# Invalid object name
return None
if last:
#XXX: remove this statement as well as the "last" argument
token += txt[ txt.rfind(token) + len(token) ]
token += txt_end
if token:
return token
except IndexError:
return None
def getobjdir(obj):
"""
For standard objects, will simply return dir(obj)
In special cases (e.g. WrapITK package), will return only string elements
of result returned by dir(obj)
"""
return [item for item in dir(obj) if is_text_string(item)]
def getdoc(obj):
"""
Return text documentation from an object. This comes in a form of
dictionary with four keys:
name:
The name of the inspected object
argspec:
It's argspec
note:
A phrase describing the type of object (function or method) we are
inspecting, and the module it belongs to.
docstring:
It's docstring
"""
docstring = inspect.getdoc(obj) or inspect.getcomments(obj) or ''
# Most of the time doc will only contain ascii characters, but there are
# some docstrings that contain non-ascii characters. Not all source files
# declare their encoding in the first line, so querying for that might not
# yield anything, either. So assume the most commonly used
# multi-byte file encoding (which also covers ascii).
try:
docstring = to_text_string(docstring)
except:
pass
# Doc dict keys
doc = {'name': '',
'argspec': '',
'note': '',
'docstring': docstring}
if callable(obj):
try:
name = obj.__name__
except AttributeError:
doc['docstring'] = docstring
return doc
if inspect.ismethod(obj):
imclass = get_meth_class(obj)
if get_meth_class_inst(obj) is not None:
doc['note'] = 'Method of %s instance' \
% get_meth_class_inst(obj).__class__.__name__
else:
doc['note'] = 'Unbound %s method' % imclass.__name__
obj = get_meth_func(obj)
elif hasattr(obj, '__module__'):
doc['note'] = 'Function of %s module' % obj.__module__
else:
doc['note'] = 'Function'
doc['name'] = obj.__name__
if inspect.isfunction(obj):
args, varargs, varkw, defaults = inspect.getargspec(obj)
doc['argspec'] = inspect.formatargspec(args, varargs, varkw,
defaults,
formatvalue=lambda o:'='+repr(o))
if name == '<lambda>':
doc['name'] = name + ' lambda '
doc['argspec'] = doc['argspec'][1:-1] # remove parentheses
else:
argspec = getargspecfromtext(doc['docstring'])
if argspec:
doc['argspec'] = argspec
# Many scipy and numpy docstrings begin with a function
# signature on the first line. This ends up begin redundant
# when we are using title and argspec to create the
# rich text "Definition:" field. We'll carefully remove this
# redundancy but only under a strict set of conditions:
# Remove the starting charaters of the 'doc' portion *iff*
# the non-whitespace characters on the first line
# match *exactly* the combined function title
# and argspec we determined above.
signature = doc['name'] + doc['argspec']
docstring_blocks = doc['docstring'].split("\n\n")
first_block = docstring_blocks[0].strip()
if first_block == signature:
doc['docstring'] = doc['docstring'].replace(
signature, '', 1).lstrip()
else:
doc['argspec'] = '(...)'
# Remove self from argspec
argspec = doc['argspec']
doc['argspec'] = argspec.replace('(self)', '()').replace('(self, ', '(')
return doc
def getsource(obj):
"""Wrapper around inspect.getsource"""
try:
try:
src = encoding.to_unicode( inspect.getsource(obj) )
except TypeError:
if hasattr(obj, '__class__'):
src = encoding.to_unicode( inspect.getsource(obj.__class__) )
else:
# Bindings like VTK or ITK require this case
src = getdoc(obj)
return src
except (TypeError, IOError):
return
def getsignaturefromtext(text, objname):
"""Get object signatures from text (object documentation)
Return a list containing a single string in most cases
Example of multiple signatures: PyQt4 objects"""
if isinstance(text, dict):
text = text.get('docstring', '')
# Regexps
oneline_re = objname + r'\([^\)].+?(?<=[\w\]\}\'"])\)(?!,)'
multiline_re = objname + r'\([^\)]+(?<=[\w\]\}\'"])\)(?!,)'
multiline_end_parenleft_re = r'(%s\([^\)]+(\),\n.+)+(?<=[\w\]\}\'"])\))'
# Grabbing signatures
if not text:
text = ''
sigs_1 = re.findall(oneline_re + '|' + multiline_re, text)
sigs_2 = [g[0] for g in re.findall(multiline_end_parenleft_re % objname, text)]
all_sigs = sigs_1 + sigs_2
# The most relevant signature is usually the first one. There could be
# others in doctests but those are not so important
if all_sigs:
return all_sigs[0]
else:
return ''
# Fix for Issue 1953
# TODO: Add more signatures and remove this hack in 2.4
getsignaturesfromtext = getsignaturefromtext
def getargspecfromtext(text):
"""
Try to get the formatted argspec of a callable from the first block of its
docstring
This will return something like
'(foo, bar, k=1)'
"""
blocks = text.split("\n\n")
first_block = blocks[0].strip()
return getsignaturefromtext(first_block, '')
def getargsfromtext(text, objname):
"""Get arguments from text (object documentation)"""
signature = getsignaturefromtext(text, objname)
if signature:
argtxt = signature[signature.find('(')+1:-1]
return argtxt.split(',')
def getargsfromdoc(obj):
"""Get arguments from object doc"""
if obj.__doc__ is not None:
return getargsfromtext(obj.__doc__, obj.__name__)
def getargs(obj):
"""Get the names and default values of a function's arguments"""
if inspect.isfunction(obj) or inspect.isbuiltin(obj):
func_obj = obj
elif inspect.ismethod(obj):
func_obj = get_meth_func(obj)
elif inspect.isclass(obj) and hasattr(obj, '__init__'):
func_obj = getattr(obj, '__init__')
else:
return []
if not hasattr(func_obj, 'func_code'):
# Builtin: try to extract info from doc
args = getargsfromdoc(func_obj)
if args is not None:
return args
else:
# Example: PyQt4
return getargsfromdoc(obj)
args, _, _ = inspect.getargs(func_obj.func_code)
if not args:
return getargsfromdoc(obj)
# Supporting tuple arguments in def statement:
for i_arg, arg in enumerate(args):
if isinstance(arg, list):
args[i_arg] = "(%s)" % ", ".join(arg)
defaults = get_func_defaults(func_obj)
if defaults is not None:
for index, default in enumerate(defaults):
args[index+len(args)-len(defaults)] += '='+repr(default)
if inspect.isclass(obj) or inspect.ismethod(obj):
if len(args) == 1:
return None
if 'self' in args:
args.remove('self')
return args
def getargtxt(obj, one_arg_per_line=True):
"""
Get the names and default values of a function's arguments
Return list with separators (', ') formatted for calltips
"""
args = getargs(obj)
if args:
sep = ', '
textlist = None
for i_arg, arg in enumerate(args):
if textlist is None:
textlist = ['']
textlist[-1] += arg
if i_arg < len(args)-1:
textlist[-1] += sep
if len(textlist[-1]) >= 32 or one_arg_per_line:
textlist.append('')
if inspect.isclass(obj) or inspect.ismethod(obj):
if len(textlist) == 1:
return None
if 'self'+sep in textlist:
textlist.remove('self'+sep)
return textlist
def isdefined(obj, force_import=False, namespace=None):
"""Return True if object is defined in namespace
If namespace is None --> namespace = locals()"""
if namespace is None:
namespace = locals()
attr_list = obj.split('.')
base = attr_list.pop(0)
if len(base) == 0:
return False
if base not in builtins.__dict__ and base not in namespace:
if force_import:
try:
module = __import__(base, globals(), namespace)
if base not in globals():
globals()[base] = module
namespace[base] = module
except (ImportError, SyntaxError):
return False
else:
return False
for attr in attr_list:
try:
attr_not_found = not hasattr(eval(base, namespace), attr)
except SyntaxError:
return False
if attr_not_found:
if force_import:
try:
__import__(base+'.'+attr, globals(), namespace)
except (ImportError, SyntaxError):
return False
else:
return False
base += '.'+attr
return True
if __name__ == "__main__":
class Test(object):
def method(self, x, y=2):
pass
print(getargtxt(Test.__init__))
print(getargtxt(Test.method))
print(isdefined('numpy.take', force_import=True))
print(isdefined('__import__'))
print(isdefined('.keys', force_import=True))
print(getobj('globals'))
print(getobj('globals().keys'))
print(getobj('+scipy.signal.'))
print(getobj('4.'))
print(getdoc(sorted))
print(getargtxt(sorted))
|
kenshay/ImageScript
|
ProgramData/SystemFiles/Python/Lib/site-packages/spyderlib/utils/dochelpers.py
|
Python
|
gpl-3.0
| 11,439
|
[
"VTK"
] |
cf93b84ef9489252d6c49d4847c2fc504181eed5648460cec9e86e47109edb15
|
import math
from ecell4.reaction_reader.decorator2 import species_attributes, reaction_rules
from ecell4.reaction_reader.network import generate_reactions
@species_attributes
def attributegen():
gTetR(lac1,lac2) | 1
gCI(tet1,tet2) | 1
gLacI(cI1,cI2) | 1
# gTetR(lac1^1,lac2^2).pLacI(tet^1).pLacI(tet^2) | 1
# gCI(tet1^1,tet2^2).pTetR(cI^1).pTetR(cI^2) | 1
# gLacI(cI1^1,cI2^2).pCI(lac^1).pCI(lac^2) | 1
# mTetR | 3163
# mCI | 6819
# mLacI | 129
# pTetR(cI) | 183453
# pCI(lac) | 2006198
# pLacI(tet) | 165670
@reaction_rules
def rulegen(Na, V, c0, c1, c2, c3, c4, c5, c6, c7, tF, rF, pF):
(gTetR(lac1,lac2) + pLacI(tet) == gTetR(lac1^1,lac2).pLacI(tet^1)
| (c0 / Na / V * tF / pF, c1 * tF))
(gTetR(lac1^_,lac2) + pLacI(tet) == gTetR(lac1^_,lac2^1).pLacI(tet^1)
| (c0 / Na / V * tF / pF, c2 * tF))
gTetR(lac1,lac2) > gTetR(lac1,lac2) + mTetR | (c3 * rF)
gTetR(lac1^_) > gTetR(lac1^_) + mTetR | (c4 * rF)
mTetR > mTetR + pTetR(cI) | (c5 / rF * pF)
mTetR > ~mTetR | c6
pTetR(cI) > ~pTetR(cI) | c7
(gCI(tet1,tet2) + pTetR(cI) == gCI(tet1^1,tet2).pTetR(cI^1)
| (c0 / Na / V * tF / pF, c1 * tF))
(gCI(tet1^_,tet2) + pTetR(cI) == gCI(tet1^_,tet2^1).pTetR(cI^1)
| (c0 / Na / V * tF / pF, c2 * tF))
gCI(tet1,tet2) > gCI(tet1,tet2) + mCI | (c3 * rF)
gCI(tet1^_) > gCI(tet1^_) + mCI | (c4 * rF)
mCI > mCI + pCI(lac) | (c5 / rF * pF)
mCI > ~mCI | c6
pCI(lac) > ~pCI(lac) | c7
(gLacI(cI1,cI2) + pCI(lac) == gLacI(cI1^1,cI2).pCI(lac^1)
| (c0 / Na / V * tF / pF, c1 * tF))
(gLacI(cI1^_,cI2) + pCI(lac) == gLacI(cI1^_,cI2^1).pCI(lac^1)
| (c0 / Na / V * tF / pF, c2 * tF))
gLacI(cI1,cI2) > gLacI(cI1,cI2) + mLacI | (c3 * rF)
gLacI(cI1^_) > gLacI(cI1^_) + mLacI | (c4 * rF)
mLacI > mLacI + pLacI(tet) | (c5 / rF * pF)
mLacI > ~mLacI | c6
pLacI(tet) > ~pLacI(tet) | c7
# (gTetR(lac,lac) + pLacI(tet) == gTetR(lac^1,lac).pLacI(tet^1)
# | (c0 / Na / V * tF / pF, c1 * tF))
#
# (gTetR(lac^_,lac) + pLacI(tet) == gTetR(lac^_,lac^1).pLacI(tet^1)
# | (c0 / Na / V * tF / pF, c2 * tF))
#
# gTetR(lac,lac) > gTetR(lac,lac) + mTetR | (c3 * rF)
# gTetR(lac^_) > gTetR(lac^_) + mTetR | (c4 * rF)
# mTetR > mTetR + pTetR(cI) | (c5 / rF * pF)
# mTetR + Null > Null | c6
# pTetR(cI) + Null > Null | c7
#
# (gCI(tet,tet) + pTetR(cI) == gCI(tet^1,tet).pTetR(cI^1)
# | (c0 / Na / V * tF / pF, c1 * tF))
#
# (gCI(tet^_,tet) + pTetR(cI) == gCI(tet^_,tet^1).pTetR(cI^1)
# | (c0 / Na / V * tF / pF, c2 * tF))
#
# gCI(tet,tet) > gCI(tet,tet) + mCI | (c3 * rF)
#
# gCI(tet^_) > gCI(tet^_) + mCI | (c4 * rF)
# mCI > mCI + pCI(lac) | (c5 / rF * pF)
# mCI + Null > Null | c6
# pCI(lac) + Null > Null | c7
#
# (gLacI(cI,cI) + pCI(lac) == gLacI(cI^1,cI).pCI(lac^1)
# | (c0 / Na / V * tF / pF, c1 * tF))
#
# (gLacI(cI^_,cI) + pCI(lac) == gLacI(cI^_,cI^1).pCI(lac^1)
# | (c0 / Na / V * tF / pF, c2 * tF))
#
# gLacI(cI,cI) > gLacI(cI,cI) + mLacI | (c3 * rF)
# gLacI(cI^_) > gLacI(cI^_) + mLacI | (c4 * rF)
# mLacI > mLacI + pLacI(tet) | (c5 / rF * pF)
# mLacI + Null > Null | c6
# pLacI(tet) + Null > Null | c7
if __name__ == "__main__":
newseeds = []
for i, (sp, attr) in enumerate(attributegen()):
print i, sp, attr
newseeds.append(sp)
print ''
rules = rulegen(
6.022e23 ,1.4e-15, 1e9, 224, 9, 0.5, 5e-4, 0.167,
math.log(2) / 120, math.log(2) / 600, 1e-4, 1000, 1000)
for i, rr in enumerate(rules):
print i, rr
print ''
generate_reactions(newseeds, rules)
#begin model
#begin parameters
# Na 6.022e23 # Avogadro's [mol^-1]
# V 1.4e-15 # Cell volume [L]
# #
# c0 1e9 # M^-1 s^-1
# c1 224 # s^-1
# c2 9 # s^-1
# c3 0.5 # s^-1
# c4 5e-4 # s^-1
# c5 0.167 # s^-1
# c6 ln(2)/120 # s^-1
# c7 ln(2)/600 # s^-1
# #
# tF 1e-4 # telegraph factor
# rF 1000 # rna factor
# pF 1000 # protein factor
#end parameters
#
#begin molecule types
# Null()
# gTetR(lac,lac)
# gCI(tet,tet)
# gLacI(cI,cI)
# mTetR()
# mCI()
# mLacI()
# pTetR(cI)
# pCI(lac)
# pLacI(tet)
#end molecule types
#
#begin seed species
# Null() 1
# gTetR(lac!1,lac!2).pLacI(tet!1).pLacI(tet!2) 1
# gCI(tet!1,tet!2).pTetR(cI!1).pTetR(cI!2) 1
# gLacI(cI!1,cI!2).pCI(lac!1).pCI(lac!2) 1
# mTetR() 3163
# mCI() 6819
# mLacI() 129
# pTetR(cI) 183453
# pCI(lac) 2006198
# pLacI(tet) 165670
#end seed species
#
#begin observables
# Molecules pTetR pTetR(cI)
# Molecules pCI pCI(lac)
# Molecules pLacI pLacI(tet)
# Molecules NULL Null()
#end observables
#
#begin reaction rules
# gTetR(lac,lac) + pLacI(tet) <-> gTetR(lac!1,lac).pLacI(tet!1) c0/Na/V*tF/pF, c1*tF
# gTetR(lac!+,lac) + pLacI(tet) <-> gTetR(lac!+,lac!1).pLacI(tet!1) c0/Na/V*tF/pF, c2*tF
# gTetR(lac,lac) -> gTetR(lac,lac) + mTetR() c3*rF
# gTetR(lac!+) -> gTetR(lac!+) + mTetR() c4*rF
# mTetR() -> mTetR() + pTetR(cI) c5/rF*pF
# mTetR() + Null() -> Null() c6
# pTetR(cI) + Null() -> Null() c7
# #
# gCI(tet,tet) + pTetR(cI) <-> gCI(tet!1,tet).pTetR(cI!1) c0/Na/V*tF/pF, c1*tF
# gCI(tet!+,tet) + pTetR(cI) <-> gCI(tet!+,tet!1).pTetR(cI!1) c0/Na/V*tF/pF, c2*tF
# gCI(tet,tet) -> gCI(tet,tet) + mCI() c3*rF
# gCI(tet!+) -> gCI(tet!+) + mCI() c4*rF
# mCI() -> mCI() + pCI(lac) c5/rF*pF
# mCI() + Null() -> Null() c6
# pCI(lac) + Null() -> Null() c7
# #
# gLacI(cI,cI) + pCI(lac) <-> gLacI(cI!1,cI).pCI(lac!1) c0/Na/V*tF/pF, c1*tF
# gLacI(cI!+,cI) + pCI(lac) <-> gLacI(cI!+,cI!1).pCI(lac!1) c0/Na/V*tF/pF, c2*tF
# gLacI(cI,cI) -> gLacI(cI,cI) + mLacI() c3*rF
# gLacI(cI!+) -> gLacI(cI!+) + mLacI() c4*rF
# mLacI() -> mLacI() + pLacI(tet) c5/rF*pF
# mLacI() + Null() -> Null() c6
# pLacI(tet) + Null() -> Null() c7
#end reaction rules
#end model
#
#generate_network({overwrite=>1})
#simulate({method=>"ode",t_end=>4e4,n_steps=>4e2,verbose=>1,atol=>1e-12,rtol=>1e-12})
##simulate({method=>"pla",t_end=>4e4,n_steps=>4e2,verbose=>1,pla_config=>"fEuler|sb|pre:post|eps=0.03"})
|
navoj/ecell4
|
python/samples/reaction_reader/Repressilator/Repressilator.py
|
Python
|
gpl-2.0
| 7,171
|
[
"Avogadro"
] |
e08746a0b56b19cbaa7959e0575d4fcd78cd92df2143b46a3d4310c0d4a5389c
|
########################################################################
# File : SiteDirector.py
# Author : A.T.
########################################################################
""" The Site Director is a simple agent performing pilot job submission to particular sites.
"""
import os
import base64
import bz2
import tempfile
import random
import socket
import hashlib
import re
import DIRAC
from DIRAC import S_OK, S_ERROR, gConfig
from DIRAC.Core.Base.AgentModule import AgentModule
from DIRAC.ConfigurationSystem.Client.Helpers import CSGlobals, Registry, Operations, Resources
from DIRAC.Resources.Computing.ComputingElementFactory import ComputingElementFactory
from DIRAC.WorkloadManagementSystem.Client.ServerUtils import pilotAgentsDB, jobDB
from DIRAC.WorkloadManagementSystem.Service.WMSUtilities import getGridEnv
from DIRAC.WorkloadManagementSystem.private.ConfigHelper import findGenericPilotCredentials
from DIRAC.FrameworkSystem.Client.ProxyManagerClient import gProxyManager
from DIRAC.AccountingSystem.Client.Types.Pilot import Pilot as PilotAccounting
from DIRAC.AccountingSystem.Client.DataStoreClient import gDataStoreClient
from DIRAC.Core.DISET.RPCClient import RPCClient
from DIRAC.Core.Security import CS
from DIRAC.Core.Utilities.SiteCEMapping import getSiteForCE
from DIRAC.Core.Utilities.Time import dateTime, second
from DIRAC.Core.Utilities.List import fromChar
from collections import defaultdict
__RCSID__ = "eb8b572 (2015-10-30 12:17:53 +0100) Andrei Tsaregorodtsev <atsareg@in2p3.fr>"
DIRAC_PILOT = os.path.join( DIRAC.rootPath, 'DIRAC', 'WorkloadManagementSystem', 'PilotAgent', 'dirac-pilot.py' )
DIRAC_INSTALL = os.path.join( DIRAC.rootPath, 'DIRAC', 'Core', 'scripts', 'dirac-install.py' )
DIRAC_MODULES = [ os.path.join( DIRAC.rootPath, 'DIRAC', 'WorkloadManagementSystem', 'PilotAgent', 'pilotCommands.py' ),
os.path.join( DIRAC.rootPath, 'DIRAC', 'WorkloadManagementSystem', 'PilotAgent', 'pilotTools.py' ) ]
TRANSIENT_PILOT_STATUS = ['Submitted', 'Waiting', 'Running', 'Scheduled', 'Ready', 'Unknown']
WAITING_PILOT_STATUS = ['Submitted', 'Waiting', 'Scheduled', 'Ready']
FINAL_PILOT_STATUS = ['Aborted', 'Failed', 'Done']
MAX_PILOTS_TO_SUBMIT = 100
MAX_JOBS_IN_FILLMODE = 5
class SiteDirector( AgentModule ):
"""
The specific agents must provide the following methods:
- initialize() for initial settings
- beginExecution()
- execute() - the main method called in the agent cycle
- endExecution()
- finalize() - the graceful exit of the method, this one is usually used
for the agent restart
"""
def __init__( self, *args, **kwargs ):
""" c'tor
"""
AgentModule.__init__( self, *args, **kwargs )
self.queueDict = {}
self.queueCECache = {}
self.queueSlots = {}
self.failedQueues = defaultdict( int )
self.firstPass = True
self.maxJobsInFillMode = MAX_JOBS_IN_FILLMODE
self.maxPilotsToSubmit = MAX_PILOTS_TO_SUBMIT
def initialize( self ):
""" Standard constructor
"""
self.am_setOption( "PollingTime", 60.0 )
self.am_setOption( "maxPilotWaitingHours", 6 )
return S_OK()
def beginExecution( self ):
self.gridEnv = self.am_getOption( "GridEnv", getGridEnv() )
# The SiteDirector is for a particular user community
self.vo = self.am_getOption( "VO", '' )
# The SiteDirector is for a particular submitPool
self.submitPool = self.am_getOption( "SubmitPool", '' )
if not self.vo:
self.vo = self.am_getOption( "Community", '' )
if not self.vo:
self.vo = CSGlobals.getVO()
# The SiteDirector is for a particular user group
self.group = self.am_getOption( "Group", '' )
# self.voGroups contain all the eligible user groups for pilots submutted by this SiteDirector
self.voGroups = []
# Choose the group for which pilots will be submitted. This is a hack until
# we will be able to match pilots to VOs.
if not self.group:
if self.vo:
result = Registry.getGroupsForVO( self.vo )
if not result['OK']:
return result
for group in result['Value']:
if 'NormalUser' in Registry.getPropertiesForGroup( group ):
self.voGroups.append( group )
else:
self.voGroups = [ self.group ]
result = findGenericPilotCredentials( vo = self.vo )
if not result[ 'OK' ]:
return result
self.pilotDN, self.pilotGroup = result[ 'Value' ]
self.pilotDN = self.am_getOption( "PilotDN", self.pilotDN )
self.pilotGroup = self.am_getOption( "PilotGroup", self.pilotGroup )
self.platforms = []
self.sites = []
self.listSubmitPools = []
# submitPools of the VO, and if not defined then the one of the group
defaultSubmitPools = ''
if self.group:
defaultSubmitPools = Registry.getGroupOption( self.group, 'SubmitPools', '' )
elif self.vo:
defaultSubmitPools = Registry.getVOOption( self.vo, 'SubmitPools', '' )
if defaultSubmitPools.find(',')==-1 :
self.listSubmitPools.append(defaultSubmitPools)
else:
self.listSubmitPools = defaultSubmitPools.split(',')
self.pilot = self.am_getOption( 'PilotScript', DIRAC_PILOT )
self.install = DIRAC_INSTALL
self.extraModules = self.am_getOption( 'ExtraPilotModules', [] ) + DIRAC_MODULES
self.workingDirectory = self.am_getOption( 'WorkDirectory' )
self.maxQueueLength = self.am_getOption( 'MaxQueueLength', 86400 * 3 )
self.pilotLogLevel = self.am_getOption( 'PilotLogLevel', 'INFO' )
self.maxJobsInFillMode = self.am_getOption( 'MaxJobsInFillMode', self.maxJobsInFillMode )
self.maxPilotsToSubmit = self.am_getOption( 'MaxPilotsToSubmit', self.maxPilotsToSubmit )
self.pilotWaitingFlag = self.am_getOption( 'PilotWaitingFlag', True )
self.pilotWaitingTime = self.am_getOption( 'MaxPilotWaitingTime', 3600 )
self.failedQueueCycleFactor = self.am_getOption( 'FailedQueueCycleFactor', 10 )
self.pilotStatusUpdateCycleFactor = self.am_getOption( 'PilotStatusUpdateCycleFactor', 10 )
# Flags
self.updateStatus = self.am_getOption( 'UpdatePilotStatus', True )
self.getOutput = self.am_getOption( 'GetPilotOutput', True )
self.sendAccounting = self.am_getOption( 'SendPilotAccounting', True )
# Get the site description dictionary
siteNames = None
if not self.am_getOption( 'Site', 'Any' ).lower() == "any":
siteNames = self.am_getOption( 'Site', [] )
if not siteNames:
siteNames = None
ceTypes = None
if not self.am_getOption( 'CETypes', 'Any' ).lower() == "any":
ceTypes = self.am_getOption( 'CETypes', [] )
ces = None
if not self.am_getOption( 'CEs', 'Any' ).lower() == "any":
ces = self.am_getOption( 'CEs', [] )
if not ces:
ces = None
result = Resources.getQueues( community = self.vo,
siteList = siteNames,
ceList = ces,
ceTypeList = ceTypes,
mode = 'Direct' )
if not result['OK']:
return result
resourceDict = result['Value']
result = self.getQueues( resourceDict )
if not result['OK']:
return result
#if not siteNames:
# siteName = gConfig.getValue( '/DIRAC/Site', 'Unknown' )
# if siteName == 'Unknown':
# return S_OK( 'No site specified for the SiteDirector' )
# else:
# siteNames = [siteName]
#self.siteNames = siteNames
if self.updateStatus:
self.log.always( 'Pilot status update requested' )
if self.getOutput:
self.log.always( 'Pilot output retrieval requested' )
if self.sendAccounting:
self.log.always( 'Pilot accounting sending requested' )
self.log.always( 'Sites:', siteNames )
self.log.always( 'CETypes:', ceTypes )
self.log.always( 'CEs:', ces )
self.log.always( 'PilotDN:', self.pilotDN )
self.log.always( 'PilotGroup:', self.pilotGroup )
self.log.always( 'MaxPilotsToSubmit:', self.maxPilotsToSubmit )
self.log.always( 'MaxJobsInFillMode:', self.maxJobsInFillMode )
self.localhost = socket.getfqdn()
self.proxy = ''
if self.firstPass:
if self.queueDict:
self.log.always( "Agent will serve queues:" )
for queue in self.queueDict:
self.log.always( "Site: %s, CE: %s, Queue: %s" % ( self.queueDict[queue]['Site'],
self.queueDict[queue]['CEName'],
queue ) )
self.firstPass = False
return S_OK()
def __generateQueueHash( self, queueDict ):
""" Generate a hash of the queue description
"""
myMD5 = hashlib.md5()
myMD5.update( str( queueDict ) )
hexstring = myMD5.hexdigest()
return hexstring
def getQueues( self, resourceDict ):
""" Get the list of relevant CEs and their descriptions
"""
self.queueDict = {}
ceFactory = ComputingElementFactory()
for site in resourceDict:
for ce in resourceDict[site]:
ceDict = resourceDict[site][ce]
ceTags = ceDict.get( 'Tag', [] )
if isinstance( ceTags, basestring ):
ceTags = fromChar( ceTags )
ceMaxRAM = ceDict.get( 'MaxRAM', None )
qDict = ceDict.pop( 'Queues' )
for queue in qDict:
queueName = '%s_%s' % ( ce, queue )
self.queueDict[queueName] = {}
self.queueDict[queueName]['ParametersDict'] = qDict[queue]
self.queueDict[queueName]['ParametersDict']['Queue'] = queue
self.queueDict[queueName]['ParametersDict']['Site'] = site
self.queueDict[queueName]['ParametersDict']['GridEnv'] = self.gridEnv
self.queueDict[queueName]['ParametersDict']['Setup'] = gConfig.getValue( '/DIRAC/Setup', 'unknown' )
# Evaluate the CPU limit of the queue according to the Glue convention
# To Do: should be a utility
if "maxCPUTime" in self.queueDict[queueName]['ParametersDict'] and \
"SI00" in self.queueDict[queueName]['ParametersDict']:
maxCPUTime = float( self.queueDict[queueName]['ParametersDict']['maxCPUTime'] )
# For some sites there are crazy values in the CS
maxCPUTime = max( maxCPUTime, 0 )
maxCPUTime = min( maxCPUTime, 86400 * 12.5 )
si00 = float( self.queueDict[queueName]['ParametersDict']['SI00'] )
queueCPUTime = 60. / 250. * maxCPUTime * si00
self.queueDict[queueName]['ParametersDict']['CPUTime'] = int( queueCPUTime )
queueTags = self.queueDict[queueName]['ParametersDict'].get( 'Tag' )
if queueTags and isinstance( queueTags, basestring ):
queueTags = fromChar( queueTags )
self.queueDict[queueName]['ParametersDict']['Tag'] = queueTags
if ceTags:
if queueTags:
allTags = list( set( ceTags + queueTags ) )
self.queueDict[queueName]['ParametersDict']['Tag'] = allTags
else:
self.queueDict[queueName]['ParametersDict']['Tag'] = ceTags
maxRAM = self.queueDict[queueName]['ParametersDict'].get( 'MaxRAM' )
maxRAM = ceMaxRAM if not maxRAM else maxRAM
if maxRAM:
self.queueDict[queueName]['ParametersDict']['MaxRAM'] = maxRAM
qwDir = os.path.join( self.workingDirectory, queue )
if not os.path.exists( qwDir ):
os.makedirs( qwDir )
self.queueDict[queueName]['ParametersDict']['WorkingDirectory'] = qwDir
platform = ''
if "Platform" in self.queueDict[queueName]['ParametersDict']:
platform = self.queueDict[queueName]['ParametersDict']['Platform']
elif "Platform" in ceDict:
platform = ceDict['Platform']
elif "OS" in ceDict:
architecture = ceDict.get( 'architecture', 'x86_64' )
OS = ceDict['OS']
platform = '_'.join( [architecture, OS] )
if platform and not platform in self.platforms:
self.platforms.append( platform )
if not "Platform" in self.queueDict[queueName]['ParametersDict'] and platform:
result = Resources.getDIRACPlatform( platform )
if result['OK']:
self.queueDict[queueName]['ParametersDict']['Platform'] = result['Value'][0]
ceQueueDict = dict( ceDict )
ceQueueDict.update( self.queueDict[queueName]['ParametersDict'] )
# Generate the CE object for the queue or pick the already existing one
# if the queue definition did not change
queueHash = self.__generateQueueHash( ceQueueDict )
if queueName in self.queueCECache and self.queueCECache[queueName]['Hash'] == queueHash:
queueCE = self.queueCECache[queueName]['CE']
else:
result = ceFactory.getCE( ceName = ce,
ceType = ceDict['CEType'],
ceParametersDict = ceQueueDict )
if not result['OK']:
return result
self.queueCECache.setdefault( queueName, {} )
self.queueCECache[queueName]['Hash'] = queueHash
self.queueCECache[queueName]['CE'] = result['Value']
queueCE = self.queueCECache[queueName]['CE']
self.queueDict[queueName]['CE'] = queueCE
self.queueDict[queueName]['CEName'] = ce
self.queueDict[queueName]['CEType'] = ceDict['CEType']
self.queueDict[queueName]['Site'] = site
self.queueDict[queueName]['QueueName'] = queue
self.queueDict[queueName]['Platform'] = platform
result = self.queueDict[queueName]['CE'].isValid()
if not result['OK']:
self.log.fatal( result['Message'] )
return result
if 'BundleProxy' in self.queueDict[queueName]['ParametersDict']:
if self.queueDict[queueName]['ParametersDict']['BundleProxy'].lower() in ['true','yes','1']:
self.queueDict[queueName]['BundleProxy'] = True
elif 'BundleProxy' in ceDict:
if ceDict['BundleProxy'].lower() in ['true','yes','1']:
self.queueDict[queueName]['BundleProxy'] = True
if site not in self.sites:
self.sites.append( site )
return S_OK()
def execute( self ):
""" Main execution method
"""
if not self.queueDict:
self.log.warn( 'No site defined, exiting the cycle' )
return S_OK()
result = self.submitJobs()
if not result['OK']:
self.log.error( 'Errors in the job submission: ', result['Message'] )
cyclesDone = self.am_getModuleParam( 'cyclesDone' )
if self.updateStatus and cyclesDone % self.pilotStatusUpdateCycleFactor == 0:
result = self.updatePilotStatus()
if not result['OK']:
self.log.error( 'Errors in updating pilot status: ', result['Message'] )
return S_OK()
def submitJobs( self ):
""" Go through defined computing elements and submit jobs if necessary
"""
# Check that there is some work at all
setup = CSGlobals.getSetup()
# because tq_TQToSubmitPools has a Value filed which is SubmitPool, not SubmitPools
# the compromise solution in the current SiteDirector design, is to have a new SubmitPool (without s since it is a siteDirector)
# then to check if such SiteDirector SubmitPool is in the VOs self.listSubmitPools, if not inforn and continue
# if yes then match only this SubmitPool:
if not self.submitPool:
#this is for backward setup compatibility, then, just the first VO SubmitPools
tqDict = { 'Setup':setup,
'CPUTime': 9999999,
'SubmitPool' : self.listSubmitPools[0] }
else:
if self.submitPool in self.listSubmitPools:
tqDict = { 'Setup':setup,
'CPUTime': 9999999,
'SubmitPool' : self.submitPool }
else:
self.log.info( 'SiteDirector SubmitPool:%s not defined in per VO or group enabled SubmitPools' % ( self.submitPool ) )
return S_OK()
if self.vo:
tqDict['Community'] = self.vo
if self.voGroups:
tqDict['OwnerGroup'] = self.voGroups
result = Resources.getCompatiblePlatforms( self.platforms )
if not result['OK']:
return result
tqDict['Platform'] = result['Value']
tqDict['Site'] = self.sites
tqDict['Tag'] = []
self.log.verbose( 'Checking overall TQ availability with requirements' )
self.log.verbose( tqDict )
rpcMatcher = RPCClient( "WorkloadManagement/Matcher" )
result = rpcMatcher.getMatchingTaskQueues( tqDict )
if not result[ 'OK' ]:
return result
if not result['Value']:
self.log.verbose( 'No Waiting jobs suitable for the director' )
return S_OK()
jobSites = set()
anySite = False
testSites = set()
totalWaitingJobs = 0
for tqID in result['Value']:
if "Sites" in result['Value'][tqID]:
for site in result['Value'][tqID]['Sites']:
if site.lower() != 'any':
jobSites.add( site )
else:
anySite = True
else:
anySite = True
if "JobTypes" in result['Value'][tqID]:
if "Sites" in result['Value'][tqID]:
for site in result['Value'][tqID]['Sites']:
if site.lower() != 'any':
testSites.add( site )
totalWaitingJobs += result['Value'][tqID]['Jobs']
tqIDList = result['Value'].keys()
result = pilotAgentsDB.countPilots( { 'TaskQueueID': tqIDList,
'Status': WAITING_PILOT_STATUS },
None )
totalWaitingPilots = 0
if result['OK']:
totalWaitingPilots = result['Value']
self.log.info( 'Total %d jobs in %d task queues with %d waiting pilots' % (totalWaitingJobs, len( tqIDList ), totalWaitingPilots ) )
#if totalWaitingPilots >= totalWaitingJobs:
# self.log.info( 'No more pilots to be submitted in this cycle' )
# return S_OK()
# Check if the site is allowed in the mask
result = jobDB.getSiteMask()
if not result['OK']:
return S_ERROR( 'Can not get the site mask' )
siteMaskList = result['Value']
queues = self.queueDict.keys()
random.shuffle( queues )
totalSubmittedPilots = 0
matchedQueues = 0
for queue in queues:
# Check if the queue failed previously
failedCount = self.failedQueues[ queue ] % self.failedQueueCycleFactor
if failedCount != 0:
self.log.warn( "%s queue failed recently, skipping %d cycles" % ( queue, 10-failedCount ) )
self.failedQueues[queue] += 1
continue
ce = self.queueDict[queue]['CE']
ceName = self.queueDict[queue]['CEName']
ceType = self.queueDict[queue]['CEType']
queueName = self.queueDict[queue]['QueueName']
siteName = self.queueDict[queue]['Site']
platform = self.queueDict[queue]['Platform']
siteMask = siteName in siteMaskList
if not anySite and siteName not in jobSites:
self.log.verbose( "Skipping queue %s at %s: no workload expected" % (queueName, siteName) )
continue
if not siteMask and siteName not in testSites:
self.log.verbose( "Skipping queue %s at site %s not in the mask" % (queueName, siteName) )
continue
if 'CPUTime' in self.queueDict[queue]['ParametersDict'] :
queueCPUTime = int( self.queueDict[queue]['ParametersDict']['CPUTime'] )
else:
self.log.warn( 'CPU time limit is not specified for queue %s, skipping...' % queue )
continue
if queueCPUTime > self.maxQueueLength:
queueCPUTime = self.maxQueueLength
# Prepare the queue description to look for eligible jobs
ceDict = ce.getParameterDict()
ceDict[ 'GridCE' ] = ceName
#if not siteMask and 'Site' in ceDict:
# self.log.info( 'Site not in the mask %s' % siteName )
# self.log.info( 'Removing "Site" from matching Dict' )
# del ceDict[ 'Site' ]
if not siteMask:
ceDict['JobType'] = "Test"
if self.vo:
ceDict['Community'] = self.vo
if self.voGroups:
ceDict['OwnerGroup'] = self.voGroups
# This is a hack to get rid of !
# ceDict['SubmitPool'] = self.defaultSubmitPools
# this lazy hack brokes the matcher desing, matcher is not able to match multiple values in SubmitPool,
# because tq_TQToSubmitPools has a Value filed which is SubmitPool, not SubmitPools
# we neither can just process any submitPools of the VO, just those of the VO, and also of the siteDirector,
# the compromise solution in the current SiteDirector design, is to have a new SubmitPool (without s since it is a siteDirector)
# then to check if such SiteDirector SubmitPool is in the VOs self.listSubmitPools, if not inforn and continue
# if yes then match only this SubmitPool
#
if not self.submitPool:
#this is for backward setup compatibility, then, just the first VO SubmitPools
ceDict['SubmitPool'] = self.listSubmitPools[0]
else:
#we don't check if siteDirector is in group or vo submitPools, because the check has been done above at the begingin of this function
ceDict['SubmitPool'] = self.submitPool
result = Resources.getCompatiblePlatforms( platform )
if not result['OK']:
continue
ceDict['Platform'] = result['Value']
# Get the number of eligible jobs for the target site/queue
result = rpcMatcher.getMatchingTaskQueues( ceDict )
if not result['OK']:
self.log.error( 'Could not retrieve TaskQueues from TaskQueueDB', result['Message'] )
return result
taskQueueDict = result['Value']
if not taskQueueDict:
self.log.verbose( 'No matching TQs found for %s' % queue )
continue
matchedQueues += 1
totalTQJobs = 0
tqIDList = taskQueueDict.keys()
for tq in taskQueueDict:
totalTQJobs += taskQueueDict[tq]['Jobs']
self.log.verbose( '%d job(s) from %d task queue(s) are eligible for %s queue' % (totalTQJobs, len( tqIDList ), queue) )
# Get the number of already waiting pilots for these task queues
totalWaitingPilots = 0
if self.pilotWaitingFlag:
lastUpdateTime = dateTime() - self.pilotWaitingTime * second
result = pilotAgentsDB.countPilots( { 'TaskQueueID': tqIDList,
'Status': WAITING_PILOT_STATUS },
None, lastUpdateTime )
if not result['OK']:
self.log.error( 'Failed to get Number of Waiting pilots', result['Message'] )
totalWaitingPilots = 0
else:
totalWaitingPilots = result['Value']
self.log.verbose( 'Waiting Pilots for TaskQueue %s:' % tqIDList, totalWaitingPilots )
if totalWaitingPilots >= totalTQJobs:
self.log.verbose( "%d waiting pilots already for all the available jobs" % totalWaitingPilots )
continue
self.log.verbose( "%d waiting pilots for the total of %d eligible jobs for %s" % (totalWaitingPilots, totalTQJobs, queue) )
# Get the working proxy
cpuTime = queueCPUTime + 86400
self.log.verbose( "Getting pilot proxy for %s/%s %d long" % ( self.pilotDN, self.pilotGroup, cpuTime ) )
result = gProxyManager.getPilotProxyFromDIRACGroup( self.pilotDN, self.pilotGroup, cpuTime )
if not result['OK']:
return result
self.proxy = result['Value']
ce.setProxy( self.proxy, cpuTime - 60 )
# Get the number of available slots on the target site/queue
totalSlots = self.__getQueueSlots( queue )
if totalSlots == 0:
self.log.debug( '%s: No slots available' % queue )
continue
pilotsToSubmit = max( 0, min( totalSlots, totalTQJobs - totalWaitingPilots ) )
self.log.info( '%s: Slots=%d, TQ jobs=%d, Pilots: waiting %d, to submit=%d' % \
( queue, totalSlots, totalTQJobs, totalWaitingPilots, pilotsToSubmit ) )
# Limit the number of pilots to submit to MAX_PILOTS_TO_SUBMIT
pilotsToSubmit = min( self.maxPilotsToSubmit, pilotsToSubmit )
while pilotsToSubmit > 0:
self.log.info( 'Going to submit %d pilots to %s queue' % ( pilotsToSubmit, queue ) )
bundleProxy = self.queueDict[queue].get( 'BundleProxy', False )
jobExecDir = ''
jobExecDir = self.queueDict[queue]['ParametersDict'].get( 'JobExecDir', jobExecDir )
httpProxy = self.queueDict[queue]['ParametersDict'].get( 'HttpProxy', '' )
result = self.__getExecutable( queue, pilotsToSubmit, bundleProxy, httpProxy, jobExecDir )
if not result['OK']:
return result
executable, pilotSubmissionChunk = result['Value']
result = ce.submitJob( executable, '', pilotSubmissionChunk )
### FIXME: The condor thing only transfers the file with some
### delay, so when we unlink here the script is gone
### FIXME 2: but at some time we need to clean up the pilot wrapper scripts...
if ceType != 'HTCondorCE':
os.unlink( executable )
if not result['OK']:
self.log.error( 'Failed submission to queue %s:\n' % queue, result['Message'] )
pilotsToSubmit = 0
self.failedQueues[queue] += 1
continue
pilotsToSubmit = pilotsToSubmit - pilotSubmissionChunk
# Add pilots to the PilotAgentsDB assign pilots to TaskQueue proportionally to the
# task queue priorities
pilotList = result['Value']
self.queueSlots[queue]['AvailableSlots'] -= len( pilotList )
totalSubmittedPilots += len( pilotList )
self.log.info( 'Submitted %d pilots to %s@%s' % ( len( pilotList ), queueName, ceName ) )
stampDict = {}
if result.has_key( 'PilotStampDict' ):
stampDict = result['PilotStampDict']
tqPriorityList = []
sumPriority = 0.
for tq in taskQueueDict:
sumPriority += taskQueueDict[tq]['Priority']
tqPriorityList.append( ( tq, sumPriority ) )
rndm = random.random()*sumPriority
tqDict = {}
for pilotID in pilotList:
rndm = random.random() * sumPriority
for tq, prio in tqPriorityList:
if rndm < prio:
tqID = tq
break
if not tqDict.has_key( tqID ):
tqDict[tqID] = []
tqDict[tqID].append( pilotID )
for tqID, pilotList in tqDict.items():
result = pilotAgentsDB.addPilotTQReference( pilotList,
tqID,
self.pilotDN,
self.pilotGroup,
self.localhost,
ceType,
'',
stampDict )
if not result['OK']:
self.log.error( 'Failed add pilots to the PilotAgentsDB: ', result['Message'] )
continue
for pilot in pilotList:
result = pilotAgentsDB.setPilotStatus( pilot, 'Submitted', ceName,
'Successfully submitted by the SiteDirector',
siteName, queueName )
if not result['OK']:
self.log.error( 'Failed to set pilot status: ', result['Message'] )
continue
self.log.info( "%d pilots submitted in total in this cycle, %d matched queues" % ( totalSubmittedPilots, matchedQueues ) )
return S_OK()
def __getQueueSlots( self, queue ):
""" Get the number of available slots in the queue
"""
ce = self.queueDict[queue]['CE']
ceName = self.queueDict[queue]['CEName']
queueName = self.queueDict[queue]['QueueName']
self.queueSlots.setdefault( queue, {} )
totalSlots = self.queueSlots[queue].get( 'AvailableSlots', 0 )
availableSlotsCount = self.queueSlots[queue].setdefault( 'AvailableSlotsCount', 0 )
if totalSlots == 0:
if availableSlotsCount % 10 == 0:
# Get the list of already existing pilots for this queue
jobIDList = None
result = pilotAgentsDB.selectPilots( {'DestinationSite':ceName,
'Queue':queueName,
'Status': TRANSIENT_PILOT_STATUS } )
if result['OK']:
jobIDList = result['Value']
result = ce.available( jobIDList )
print "AT >>> ce.available", result
if not result['OK']:
self.log.warn( 'Failed to check the availability of queue %s: \n%s' % ( queue, result['Message'] ) )
self.failedQueues[queue] += 1
else:
ceInfoDict = result['CEInfoDict']
self.log.info( "CE queue report(%s_%s): Wait=%d, Run=%d, Submitted=%d, Max=%d" % \
( ceName, queueName, ceInfoDict['WaitingJobs'], ceInfoDict['RunningJobs'],
ceInfoDict['SubmittedJobs'], ceInfoDict['MaxTotalJobs'] ) )
totalSlots = result['Value']
self.queueSlots[queue]['AvailableSlots'] = totalSlots
self.queueSlots[queue]['AvailableSlotsCount'] += 1
return totalSlots
#####################################################################################
def __getExecutable( self, queue, pilotsToSubmit, bundleProxy = True, httpProxy = '', jobExecDir = '' ):
""" Prepare the full executable for queue
"""
proxy = None
if bundleProxy:
proxy = self.proxy
pilotOptions, pilotsToSubmit = self._getPilotOptions( queue, pilotsToSubmit )
if pilotOptions is None:
self.log.error( "Pilot options empty, error in compilation" )
return S_ERROR( "Errors in compiling pilot options" )
self.log.verbose( 'pilotOptions: ', ' '.join( pilotOptions ) )
executable = self._writePilotScript( self.workingDirectory, pilotOptions, proxy, httpProxy, jobExecDir )
return S_OK( [ executable, pilotsToSubmit ] )
#####################################################################################
def _getPilotOptions( self, queue, pilotsToSubmit ):
""" Prepare pilot options
"""
queueDict = self.queueDict[queue]['ParametersDict']
pilotOptions = []
setup = gConfig.getValue( "/DIRAC/Setup", "unknown" )
if setup == 'unknown':
self.log.error( 'Setup is not defined in the configuration' )
return [ None, None ]
pilotOptions.append( '-S %s' % setup )
opsHelper = Operations.Operations( group = self.pilotGroup, setup = setup )
#Installation defined?
installationName = opsHelper.getValue( "Pilot/Installation", "" )
if installationName:
pilotOptions.append( '-V %s' % installationName )
#Project defined?
projectName = opsHelper.getValue( "Pilot/Project", "" )
if projectName:
pilotOptions.append( '-l %s' % projectName )
else:
self.log.info( 'DIRAC project will be installed by pilots' )
#Request a release
diracVersion = opsHelper.getValue( "Pilot/Version", [] )
if not diracVersion:
self.log.error( 'Pilot/Version is not defined in the configuration' )
return [ None, None ]
# diracVersion is a list of accepted releases
pilotOptions.append( '-r %s' % ','.join( str( it ) for it in diracVersion ) )
ownerDN = self.pilotDN
ownerGroup = self.pilotGroup
# Request token for maximum pilot efficiency
result = gProxyManager.requestToken( ownerDN, ownerGroup, pilotsToSubmit * self.maxJobsInFillMode )
if not result[ 'OK' ]:
self.log.error( 'Invalid proxy token request', result['Message'] )
return [ None, None ]
( token, numberOfUses ) = result[ 'Value' ]
pilotOptions.append( '-o /Security/ProxyToken=%s' % token )
# Use Filling mode
pilotOptions.append( '-M %s' % min( numberOfUses, self.maxJobsInFillMode ) )
# Since each pilot will execute min( numberOfUses, self.maxJobsInFillMode )
# with numberOfUses tokens we can submit at most:
# numberOfUses / min( numberOfUses, self.maxJobsInFillMode )
# pilots
newPilotsToSubmit = numberOfUses / min( numberOfUses, self.maxJobsInFillMode )
if newPilotsToSubmit != pilotsToSubmit:
self.log.info( 'Number of pilots to submit is changed to %d after getting the proxy token' % newPilotsToSubmit )
pilotsToSubmit = newPilotsToSubmit
# Debug
if self.pilotLogLevel.lower() == 'debug':
pilotOptions.append( '-d' )
# CS Servers
csServers = gConfig.getValue( "/DIRAC/Configuration/Servers", [] )
pilotOptions.append( '-C %s' % ",".join( csServers ) )
# DIRAC Extensions to be used in pilots
pilotExtensionsList = opsHelper.getValue( "Pilot/Extensions", [] )
extensionsList = []
if pilotExtensionsList:
if pilotExtensionsList[0] != 'None':
extensionsList = pilotExtensionsList
else:
extensionsList = CSGlobals.getCSExtensions()
if extensionsList:
pilotOptions.append( '-e %s' % ",".join( extensionsList ) )
# Requested CPU time
pilotOptions.append( '-T %s' % queueDict['CPUTime'] )
# CEName
pilotOptions.append( '-N %s' % self.queueDict[queue]['CEName'] )
# Queue
pilotOptions.append( '-Q %s' % self.queueDict[queue]['QueueName'] )
# SiteName
pilotOptions.append( '-n %s' % queueDict['Site'] )
if "ExtraPilotOptions" in queueDict:
pilotOptions.append( queueDict['ExtraPilotOptions'] )
# Hack
#if self.listSubmitPools:
#no more lazy hack
# at this point if no listSubmitPools this point is not reached because first check at of the function
if self.submitPool:
pilotOptions.append( '-o /Resources/Computing/CEDefaults/SubmitPool=%s' % self.submitPool )
else:
if self.listSubmitPools[0]:
pilotOptions.append( '-o /Resources/Computing/CEDefaults/SubmitPool=%s' % self.listSubmitPools[0] )
if self.group:
pilotOptions.append( '-G %s' % self.group )
return [ pilotOptions, pilotsToSubmit ]
#####################################################################################
def _writePilotScript( self, workingDirectory, pilotOptions, proxy = None,
httpProxy = '', pilotExecDir = '' ):
""" Bundle together and write out the pilot executable script, admix the proxy if given
"""
try:
compressedAndEncodedProxy = ''
proxyFlag = 'False'
if proxy is not None:
compressedAndEncodedProxy = base64.encodestring( bz2.compress( proxy.dumpAllToString()['Value'] ) )
proxyFlag = 'True'
compressedAndEncodedPilot = base64.encodestring( bz2.compress( open( self.pilot, "rb" ).read(), 9 ) )
compressedAndEncodedInstall = base64.encodestring( bz2.compress( open( self.install, "rb" ).read(), 9 ) )
compressedAndEncodedExtra = {}
for module in self.extraModules:
moduleName = os.path.basename( module )
compressedAndEncodedExtra[moduleName] = base64.encodestring( bz2.compress( open( module, "rb" ).read(), 9 ) )
except:
self.log.exception( 'Exception during file compression of proxy, dirac-pilot or dirac-install' )
return S_ERROR( 'Exception during file compression of proxy, dirac-pilot or dirac-install' )
# Extra modules
mStringList = []
for moduleName in compressedAndEncodedExtra:
mString = """open( '%s', "w" ).write(bz2.decompress( base64.decodestring( \"\"\"%s\"\"\" ) ) )""" % \
( moduleName, compressedAndEncodedExtra[moduleName] )
mStringList.append( mString )
extraModuleString = '\n '.join( mStringList )
localPilot = """#!/bin/bash
/usr/bin/env python << EOF
#
import os, stat, tempfile, sys, shutil, base64, bz2
try:
pilotExecDir = '%(pilotExecDir)s'
if not pilotExecDir:
pilotExecDir = os.getcwd()
pilotWorkingDirectory = tempfile.mkdtemp( suffix = 'pilot', prefix = 'DIRAC_', dir = pilotExecDir )
pilotWorkingDirectory = os.path.realpath( pilotWorkingDirectory )
os.chdir( pilotWorkingDirectory )
if %(proxyFlag)s:
open( 'proxy', "w" ).write(bz2.decompress( base64.decodestring( \"\"\"%(compressedAndEncodedProxy)s\"\"\" ) ) )
os.chmod("proxy", stat.S_IRUSR | stat.S_IWUSR)
os.environ["X509_USER_PROXY"]=os.path.join(pilotWorkingDirectory, 'proxy')
open( '%(pilotScript)s', "w" ).write(bz2.decompress( base64.decodestring( \"\"\"%(compressedAndEncodedPilot)s\"\"\" ) ) )
open( '%(installScript)s', "w" ).write(bz2.decompress( base64.decodestring( \"\"\"%(compressedAndEncodedInstall)s\"\"\" ) ) )
os.chmod("%(pilotScript)s", stat.S_IRUSR | stat.S_IWUSR | stat.S_IXUSR )
os.chmod("%(installScript)s", stat.S_IRUSR | stat.S_IWUSR | stat.S_IXUSR )
%(extraModuleString)s
if "LD_LIBRARY_PATH" not in os.environ:
os.environ["LD_LIBRARY_PATH"]=""
if "%(httpProxy)s":
os.environ["HTTP_PROXY"]="%(httpProxy)s"
os.environ["X509_CERT_DIR"]=os.path.join(pilotWorkingDirectory, 'etc/grid-security/certificates')
# TODO: structure the output
print '==========================================================='
print 'Environment of execution host'
for key in os.environ.keys():
print key + '=' + os.environ[key]
print '==========================================================='
except Exception, x:
print >> sys.stderr, x
shutil.rmtree( pilotWorkingDirectory )
sys.exit(-1)
cmd = "python %(pilotScript)s %(pilotOptions)s"
print 'Executing: ', cmd
sys.stdout.flush()
os.system( cmd )
shutil.rmtree( pilotWorkingDirectory )
EOF
""" % { 'compressedAndEncodedProxy': compressedAndEncodedProxy,
'compressedAndEncodedPilot': compressedAndEncodedPilot,
'compressedAndEncodedInstall': compressedAndEncodedInstall,
'extraModuleString': extraModuleString,
'httpProxy': httpProxy,
'pilotExecDir': pilotExecDir,
'pilotScript': os.path.basename( self.pilot ),
'installScript': os.path.basename( self.install ),
'pilotOptions': ' '.join( pilotOptions ),
'proxyFlag': proxyFlag }
fd, name = tempfile.mkstemp( suffix = '_pilotwrapper.py', prefix = 'DIRAC_', dir = workingDirectory )
pilotWrapper = os.fdopen( fd, 'w' )
pilotWrapper.write( localPilot )
pilotWrapper.close()
return name
def updatePilotStatus( self ):
""" Update status of pilots in transient states
"""
for queue in self.queueDict:
ce = self.queueDict[queue]['CE']
ceName = self.queueDict[queue]['CEName']
queueName = self.queueDict[queue]['QueueName']
ceType = self.queueDict[queue]['CEType']
siteName = self.queueDict[queue]['Site']
abortedPilots = 0
result = pilotAgentsDB.selectPilots( {'DestinationSite':ceName,
'Queue':queueName,
'GridType':ceType,
'GridSite':siteName,
'Status':TRANSIENT_PILOT_STATUS,
'OwnerDN': self.pilotDN,
'OwnerGroup': self.pilotGroup } )
if not result['OK']:
self.log.error( 'Failed to select pilots: %s' % result['Message'] )
continue
pilotRefs = result['Value']
if not pilotRefs:
continue
result = pilotAgentsDB.getPilotInfo( pilotRefs )
if not result['OK']:
self.log.error( 'Failed to get pilots info from DB', result['Message'] )
continue
pilotDict = result['Value']
stampedPilotRefs = []
for pRef in pilotDict:
if pilotDict[pRef]['PilotStamp']:
stampedPilotRefs.append( pRef + ":::" + pilotDict[pRef]['PilotStamp'] )
else:
stampedPilotRefs = list( pilotRefs )
break
result = ce.isProxyValid()
if not result['OK']:
result = gProxyManager.getPilotProxyFromDIRACGroup( self.pilotDN, self.pilotGroup, 23400 )
if not result['OK']:
return result
self.proxy = result['Value']
ce.setProxy( self.proxy, 23300 )
result = ce.getJobStatus( stampedPilotRefs )
if not result['OK']:
self.log.error( 'Failed to get pilots status from CE', '%s: %s' % ( ceName, result['Message'] ) )
continue
pilotCEDict = result['Value']
for pRef in pilotRefs:
newStatus = ''
oldStatus = pilotDict[pRef]['Status']
ceStatus = pilotCEDict[pRef]
lastUpdateTime = pilotDict[pRef]['LastUpdateTime']
sinceLastUpdate = dateTime() - lastUpdateTime
if oldStatus == ceStatus and ceStatus != "Unknown":
# Normal status did not change, continue
continue
elif ceStatus == "Unknown" and oldStatus == "Unknown":
if sinceLastUpdate < 3600*second:
# Allow 1 hour of Unknown status assuming temporary problems on the CE
continue
else:
newStatus = 'Aborted'
elif ceStatus == "Unknown" and not oldStatus in FINAL_PILOT_STATUS:
# Possible problems on the CE, let's keep the Unknown status for a while
newStatus = 'Unknown'
elif ceStatus != 'Unknown' :
# Update the pilot status to the new value
newStatus = ceStatus
if newStatus:
self.log.info( 'Updating status to %s for pilot %s' % ( newStatus, pRef ) )
result = pilotAgentsDB.setPilotStatus( pRef, newStatus, '', 'Updated by SiteDirector' )
if newStatus == "Aborted":
abortedPilots += 1
# Retrieve the pilot output now
if newStatus in FINAL_PILOT_STATUS:
if pilotDict[pRef]['OutputReady'].lower() == 'false' and self.getOutput:
self.log.info( 'Retrieving output for pilot %s' % pRef )
pilotStamp = pilotDict[pRef]['PilotStamp']
pRefStamp = pRef
if pilotStamp:
pRefStamp = pRef + ':::' + pilotStamp
result = ce.getJobOutput( pRefStamp )
if not result['OK']:
self.log.error( 'Failed to get pilot output', '%s: %s' % ( ceName, result['Message'] ) )
else:
output, error = result['Value']
if output:
result = pilotAgentsDB.storePilotOutput( pRef, output, error )
if not result['OK']:
self.log.error( 'Failed to store pilot output', result['Message'] )
else:
self.log.warn( 'Empty pilot output not stored to PilotDB' )
# If something wrong in the queue, make a pause for the job submission
if abortedPilots:
self.failedQueues[queue] += 1
# The pilot can be in Done state set by the job agent check if the output is retrieved
for queue in self.queueDict:
ce = self.queueDict[queue]['CE']
if not ce.isProxyValid( 120 ):
result = gProxyManager.getPilotProxyFromDIRACGroup( self.pilotDN, self.pilotGroup, 1000 )
if not result['OK']:
return result
ce.setProxy( self.proxy, 940 )
ceName = self.queueDict[queue]['CEName']
queueName = self.queueDict[queue]['QueueName']
ceType = self.queueDict[queue]['CEType']
siteName = self.queueDict[queue]['Site']
result = pilotAgentsDB.selectPilots( {'DestinationSite':ceName,
'Queue':queueName,
'GridType':ceType,
'GridSite':siteName,
'OutputReady':'False',
'Status':FINAL_PILOT_STATUS} )
if not result['OK']:
self.log.error( 'Failed to select pilots', result['Message'] )
continue
pilotRefs = result['Value']
if not pilotRefs:
continue
result = pilotAgentsDB.getPilotInfo( pilotRefs )
if not result['OK']:
self.log.error( 'Failed to get pilots info from DB', result['Message'] )
continue
pilotDict = result['Value']
if self.getOutput:
for pRef in pilotRefs:
self.log.info( 'Retrieving output for pilot %s' % pRef )
pilotStamp = pilotDict[pRef]['PilotStamp']
pRefStamp = pRef
if pilotStamp:
pRefStamp = pRef + ':::' + pilotStamp
result = ce.getJobOutput( pRefStamp )
if not result['OK']:
self.log.error( 'Failed to get pilot output', '%s: %s' % ( ceName, result['Message'] ) )
else:
output, error = result['Value']
result = pilotAgentsDB.storePilotOutput( pRef, output, error )
if not result['OK']:
self.log.error( 'Failed to store pilot output', result['Message'] )
# Check if the accounting is to be sent
if self.sendAccounting:
result = pilotAgentsDB.selectPilots( {'DestinationSite':ceName,
'Queue':queueName,
'GridType':ceType,
'GridSite':siteName,
'AccountingSent':'False',
'Status':FINAL_PILOT_STATUS} )
if not result['OK']:
self.log.error( 'Failed to select pilots', result['Message'] )
continue
pilotRefs = result['Value']
if not pilotRefs:
continue
result = pilotAgentsDB.getPilotInfo( pilotRefs )
if not result['OK']:
self.log.error( 'Failed to get pilots info from DB', result['Message'] )
continue
pilotDict = result['Value']
result = self.sendPilotAccounting( pilotDict )
if not result['OK']:
self.log.error( 'Failed to send pilot agent accounting' )
return S_OK()
def sendPilotAccounting( self, pilotDict ):
""" Send pilot accounting record
"""
for pRef in pilotDict:
self.log.verbose( 'Preparing accounting record for pilot %s' % pRef )
pA = PilotAccounting()
pA.setEndTime( pilotDict[pRef][ 'LastUpdateTime' ] )
pA.setStartTime( pilotDict[pRef][ 'SubmissionTime' ] )
retVal = CS.getUsernameForDN( pilotDict[pRef][ 'OwnerDN' ] )
if not retVal[ 'OK' ]:
userName = 'unknown'
self.log.error( "Can't determine username for dn:", pilotDict[pRef][ 'OwnerDN' ] )
else:
userName = retVal[ 'Value' ]
pA.setValueByKey( 'User', userName )
pA.setValueByKey( 'UserGroup', pilotDict[pRef][ 'OwnerGroup' ] )
result = getSiteForCE( pilotDict[pRef][ 'DestinationSite' ] )
if result['OK'] and result[ 'Value' ].strip():
pA.setValueByKey( 'Site', result['Value'].strip() )
else:
pA.setValueByKey( 'Site', 'Unknown' )
pA.setValueByKey( 'GridCE', pilotDict[pRef][ 'DestinationSite' ] )
pA.setValueByKey( 'GridMiddleware', pilotDict[pRef][ 'GridType' ] )
pA.setValueByKey( 'GridResourceBroker', pilotDict[pRef][ 'Broker' ] )
pA.setValueByKey( 'GridStatus', pilotDict[pRef][ 'Status' ] )
if not 'Jobs' in pilotDict[pRef]:
pA.setValueByKey( 'Jobs', 0 )
else:
pA.setValueByKey( 'Jobs', len( pilotDict[pRef]['Jobs'] ) )
self.log.verbose( "Adding accounting record for pilot %s" % pilotDict[pRef][ 'PilotID' ] )
retVal = gDataStoreClient.addRegister( pA )
if not retVal[ 'OK' ]:
self.log.error( 'Failed to send accounting info for pilot ', pRef )
else:
# Set up AccountingSent flag
result = pilotAgentsDB.setAccountingFlag( pRef )
if not result['OK']:
self.log.error( 'Failed to set accounting flag for pilot ', pRef )
self.log.info( 'Committing accounting records for %d pilots' % len( pilotDict ) )
result = gDataStoreClient.commit()
if result['OK']:
for pRef in pilotDict:
self.log.verbose( 'Setting AccountingSent flag for pilot %s' % pRef )
result = pilotAgentsDB.setAccountingFlag( pRef )
if not result['OK']:
self.log.error( 'Failed to set accounting flag for pilot ', pRef )
else:
return result
return S_OK()
|
vmendez/DIRAC
|
WorkloadManagementSystem/Agent/SiteDirector.py
|
Python
|
gpl-3.0
| 48,832
|
[
"DIRAC"
] |
f00dec9c6547e949bcd4f8130ec9450dc954dac60a118131c47b1ccabc2a09b3
|
'''
GaussianDistr.py
Gaussian probability distribution
Attributes
-------
m : D-dim vector, mean
L : DxD matrix, precision matrix
'''
import numpy as np
import scipy.linalg
from ..util import dotABT, MVgammaln, MVdigamma, gammaln, digamma
from ..util import LOGTWOPI, EPS
from .Distr import Distr
class GaussDistr( Distr ):
######################################################### Constructor
#########################################################
def __init__(self, m=None, L=None):
self.m = np.asarray( m )
self.L = np.asarray( L )
self.D = self.m.size
self.Cache = dict()
######################################################### Log Cond. Prob.
######################################################### E-step
def log_pdf(self, Data):
''' Calculate log soft evidence for all data items under this distrib
Returns
-------
logp : Data.nObs x 1 vector, where
logp[n] = log p( Data[n] | self's mean and prec matrix )
'''
return -1*self.get_log_norm_const() - 0.5*self.dist_mahalanobis(Data.X)
def dist_mahalanobis(self, X):
''' Given NxD matrix X, compute Nx1 vector Dist
Dist[n] = ( X[n]-m )' L (X[n]-m)
'''
Q = dotABT(self.cholL(), X-self.m)
Q *= Q
return np.sum(Q, axis=0)
######################################################### Global updates
######################################################### M-step
''' None required. M-step handled by GaussObsModel.py
'''
######################################################### Basic properties
#########################################################
@classmethod
def calc_log_norm_const( self, logdetL, D):
return 0.5 * D * LOGTWOPI - 0.5 * logdetL
def get_log_norm_const( self ):
''' Returns log( Z ), where
PDF(x) := 1/Z(theta) f( x | theta )
'''
return 0.5 * self.D * LOGTWOPI - 0.5 * self.logdetL()
def get_entropy( self ):
''' Returns entropy of this distribution
H[ p(x) ] = -1*\int p(x|theta) log p(x|theta) dx
Remember, entropy for continuous distributions can be negative
e.g. see Bishop Ch. 1 Eq. 1.110 for Gaussian discussion
'''
return self.get_log_norm_const() + 0.5*self.D
######################################################### Accessors
#########################################################
def get_natural_params( self ):
eta = self.L, np.dot(self.L,self.m)
return eta
def set_natural_params( self, eta ):
L, Lm = eta
self.L = L
self.m = np.linalg.solve(L, Lm) # invL*L*m = m
self.Cache = dict()
def get_covar(self):
try:
return self.Cache['invL']
except KeyError:
self.Cache['invL'] = np.linalg.inv( self.L )
return self.Cache['invL']
def cholL(self):
try:
return self.Cache['cholL']
except KeyError:
self.Cache['cholL'] = scipy.linalg.cholesky(self.L) #UPPER by default
return self.Cache['cholL']
def logdetL(self):
try:
return self.Cache['logdetL']
except KeyError:
logdetL = 2.0*np.sum( np.log( np.diag( self.cholL() ) ) )
self.Cache['logdetL'] =logdetL
return logdetL
######################################################### I/O Utils
#########################################################
def to_dict(self):
return dict(m=self.m, L=self.L)
def from_dict(self, GDict):
self.m = GDict['m']
self.L = GDict['L']
self.D = self.L.shape[0]
self.Cache = dict()
|
daeilkim/refinery
|
refinery/bnpy/bnpy-dev/bnpy/distr/GaussDistr.py
|
Python
|
mit
| 3,573
|
[
"Gaussian"
] |
abdff12fa476066783bc64d516f2002aae5e2fd0d85b5f8932750b0f9f3fdeaf
|
import pytest # NOQA
import hashlib
import os
import utilities
import expected
from openwpmtest import OpenWPMTest
from ..automation import TaskManager
from ..automation.utilities.platform_utils import parse_http_stack_trace_str
class TestHTTPInstrument(OpenWPMTest):
NUM_BROWSERS = 1
def get_config(self, data_dir):
manager_params, browser_params = TaskManager.load_default_params(self.NUM_BROWSERS)
manager_params['data_directory'] = data_dir
manager_params['log_directory'] = data_dir
browser_params[0]['headless'] = True
browser_params[0]['http_instrument'] = True
browser_params[0]['save_javascript'] = True
manager_params['db'] = os.path.join(manager_params['data_directory'],
manager_params['database_name'])
return manager_params, browser_params
def test_page_visit(self, tmpdir):
test_url = utilities.BASE_TEST_URL + '/http_test_page.html'
db = self.visit(test_url, str(tmpdir))
# HTTP Requests
rows = utilities.query_db(db, (
"SELECT url, top_level_url, is_XHR, is_frame_load, is_full_page, "
"is_third_party_channel, is_third_party_window, triggering_origin "
"loading_origin, loading_href, content_policy_type FROM http_requests"))
observed_records = set()
for row in rows:
observed_records.add(row)
assert expected.http_requests == observed_records
# HTTP Responses
rows = utilities.query_db(db,
"SELECT url, referrer, location FROM http_responses")
observed_records = set()
for row in rows:
observed_records.add(row)
assert expected.http_responses == observed_records
def test_cache_hits_recorded(self, tmpdir):
"""Verify all http responses are recorded, including cached responses
Note that we expect to see all of the same requests and responses
during the second vist (even if cached) except for images. Cached
images do not trigger Observer Notification events.
See Bug 634073: https://bugzilla.mozilla.org/show_bug.cgi?id=634073
"""
test_url = utilities.BASE_TEST_URL + '/http_test_page.html'
manager_params, browser_params = self.get_config(str(tmpdir))
manager = TaskManager.TaskManager(manager_params, browser_params)
manager.get(test_url, sleep=3)
manager.get(test_url, sleep=3)
manager.close()
db = manager_params['db']
# HTTP Requests
rows = utilities.query_db(db, (
"SELECT url, top_level_url, is_XHR, is_frame_load, is_full_page, "
"is_third_party_channel, is_third_party_window, triggering_origin "
"loading_origin, loading_href, content_policy_type "
"FROM http_requests WHERE visit_id = 2"))
observed_records = set()
for row in rows:
observed_records.add(row)
assert expected.http_cached_requests == observed_records
# HTTP Responses
rows = utilities.query_db(db, (
"SELECT url, referrer, is_cached FROM http_responses "
"WHERE visit_id = 2"))
observed_records = set()
for row in rows:
observed_records.add(row)
assert expected.http_cached_responses == observed_records
def test_http_stacktrace(self, tmpdir):
test_url = utilities.BASE_TEST_URL + '/http_stacktrace.html'
db = self.visit(test_url, str(tmpdir), sleep_after=3)
rows = utilities.query_db(db, (
"SELECT url, req_call_stack FROM http_requests"))
observed_records = set()
for row in rows:
url, stacktrace = row
if (url.endswith("inject_pixel.js") or
url.endswith("test_image.png") or
url.endswith("Blank.gif")):
observed_records.add(stacktrace)
assert observed_records == expected.http_stacktraces
def test_parse_http_stack_trace_str(self, tmpdir):
stacktrace = expected.stack_trace_inject_image
stack_frames = parse_http_stack_trace_str(stacktrace)
assert stack_frames == expected.call_stack_inject_image
def test_http_stacktrace_nonjs_loads(self, tmpdir):
# stacktrace should be empty for requests NOT triggered by scripts
test_url = utilities.BASE_TEST_URL + '/http_test_page.html'
db = self.visit(test_url, str(tmpdir), sleep_after=3)
rows = utilities.query_db(db, (
"SELECT url, req_call_stack FROM http_requests"))
for row in rows:
_, stacktrace = row
assert stacktrace == ""
def test_javascript_saving(self, tmpdir):
""" check that javascript content is saved and hashed correctly """
test_url = utilities.BASE_TEST_URL + '/http_test_page.html'
db = self.visit(test_url, str(tmpdir), sleep_after=3) # NOQA
expected_hashes = {'973e28500d500eab2c27b3bc55c8b621',
'a6475af1ad58b55cf781ca5e1218c7b1'}
for chash, content in utilities.get_javascript_content(str(tmpdir)):
pyhash = hashlib.md5(content).hexdigest()
assert pyhash == chash # Verify expected key (md5 of content)
assert chash in expected_hashes
expected_hashes.remove(chash)
assert len(expected_hashes) == 0 # All expected hashes have been seen
|
tommybananas/OpenWPM
|
test/test_http_instrumentation.py
|
Python
|
gpl-3.0
| 5,457
|
[
"VisIt"
] |
1541afa6e78e3f7aad12e2c23b8dfb0d800bf41d6a35eee2db80367282437c7a
|
# -*- coding: utf-8 -*-
"""
"""
# Always prefer setuptools over distutils
from setuptools import setup, find_packages
# To use a consistent encoding
from codecs import open
from os import path
here = path.abspath(path.dirname(__file__))
# Get the long description from the README file
with open(path.join('README.md'), encoding='utf-8') as f:
long_description = f.read()
setup(
name='vissim_v8',
# Versions should comply with PEP440. For a discussion on single-sourcing
# the version across setup.py and the project code, see
# https://packaging.python.org/en/latest/single_source_version.html
version='1.0.0',
description='Parse and manipulate VISSIM v8.x models',
long_description=long_description,
# The project's main homepage.
url='http://github.com/brianhuey/vissim',
# Author details
author='S. Brian Huey',
author_email='sbhuey@gmail.com',
# Choose your license
license='BSD License',
# See https://pypi.python.org/pypi?%3Aaction=list_classifiers
classifiers=[
# How mature is this project? Common values are
# 3 - Alpha
# 4 - Beta
# 5 - Production/Stable
'Development Status :: 3 - Pre-Alpha',
# Indicate the environment the project is to be used in
'Environment :: Console',
# Indicate who your project is intended for
'Intended Audience :: Science/Research',
'Intended Audience :: End Users/Desktop',
'Intended Audience :: Developers',
'Topic :: Scientific/Engineering',
# Pick your license as you wish (should match "license" above)
'License :: OSI Approved :: BSD License',
# Specify the Python versions you support here. In particular, ensure
# that you indicate whether you support Python 2, Python 3 or both.
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7'
],
# What does your project relate to?
keywords='vissim traffic transportation modeling',
# Is your project is safe to be zipped?
zip_safe=False,
# You can just specify the packages manually here if your project is
# simple. Or you can use find_packages().
packages=['vissim_v8'],
include_package_data=True,
# Alternatively, if you want to distribute just a my_module.py, uncomment
# this:
# py_modules=[],
# List run-time dependencies here. These will be installed by pip when
# your project is installed. For an analysis of "install_requires" vs pip's
# requirements files see:
# https://packaging.python.org/en/latest/requirements.html
install_requires=['networkx>=1.11', 'numpy', 'scipy', 'lxml'],
# List additional groups of dependencies here (e.g. development
# dependencies). You can install these using the following syntax,
# for example:
# $ pip install -e .[dev,test]
# extras_require={
# 'dev': ['check-manifest'],
# 'test': ['coverage'],
# },
# If there are data files included in your packages that need to be
# installed, specify them here. If using Python 2.6 or less, then these
# have to be included in MANIFEST.in as well.
# package_data={
# 'sample': ['package_data.dat'],
# },
# Although 'package_data' is the preferred approach, in some case you may
# need to place data files outside of your packages. See:
# http://docs.python.org/3.4/distutils/setupscript.html#installing-additional-files # noqa
# In this case, 'data_file' will be installed into '<sys.prefix>/my_data'
# data_files=[('default', ['vissim_v8/default/default.inpx', 'vissim_v8/default/default.layx'])],
# To provide executable scripts, use entry points in preference to the
# "scripts" keyword. Entry points provide cross-platform support and allow
# pip to create the appropriate form of executable for the target platform.
# entry_points={
# 'console_scripts': [
# 'sample=sample:main',
# ],
# },
)
|
brianhuey/vissim
|
setup_v8.py
|
Python
|
mit
| 4,040
|
[
"Brian"
] |
df05dc6dbbab96c372edb36216a2849540a687b1533e387ab4dd837461352d7e
|
"""
Plot summary statistics as a function of S/N for stars in the training set,
showing us compared to ASPCAP for whatever labels in the model.
"""
import numpy as np
import matplotlib.pyplot as plt
import os
from astropy.io import fits
from astropy.table import Table
from six.moves import cPickle as pickle
import AnniesLasso as tc
# Load the data.
PATH, CATALOG, FILE_FORMAT = ("", "apogee-rg.fits",
"apogee-rg-custom-normalization-{}.memmap")
labelled_set = Table.read(os.path.join(PATH, CATALOG))
dispersion = np.memmap(os.path.join(PATH, FILE_FORMAT).format("dispersion"),
mode="r", dtype=float)
normalized_flux = np.memmap(
os.path.join(PATH, FILE_FORMAT).format("flux"),
mode="r", dtype=float).reshape((len(labelled_set), -1))
normalized_ivar = np.memmap(
os.path.join(PATH, FILE_FORMAT).format("ivar"),
mode="r", dtype=float).reshape(normalized_flux.shape)
# Split up the set.
np.random.seed(123)
q = np.random.randint(0, 10, len(labelled_set)) % 10
validate_set = (q == 0)
train_set = (q > 0)
"""
# Fit the validation stuff first (which is high S/N).
model = tc.L1RegularizedCannonModel(labelled_set[train_set],
normalized_flux[train_set], normalized_ivar[train_set])
model.regularization = 0
model._metadata["q"] = q
model.vectorizer = tc.vectorizer.NormalizedPolynomialVectorizer(labelled_set,
tc.vectorizer.polynomial.terminator(["TEFF", "LOGG", "FE_H"], 2),
scale_factor=0.5)
model_filename = "mudbox-3label.model"
if os.path.exists(model_filename):
model.load(model_filename)
else:
model.train(fixed_scatter=True)
model.save(model_filename)
inferred_labels = model.fit(normalized_flux[validate_set], normalized_ivar[validate_set])
inferred_labels = np.vstack(inferred_labels).T
fig, ax = plt.subplots(3)
for i, label_name in enumerate(model.vectorizer.label_names):
ax[i].scatter(labelled_set[label_name][validate_set], inferred_labels[:, i])
raise a
"""
# Fit individual spectra using two different models.
with open("apogee-rg-individual-visit-normalized.pickle", "rb") as fp:
individual_visit_spectra = pickle.load(fp, encoding="latin-1")
latex_labels = {
"TEFF": "T_{\\rm eff}",
"LOGG": "\log{g}",
"FE_H": "{\\rm [Fe/H]}"
}
models_to_compare = {
#"model1": "gridsearch-2.0-3.0.model",
"model2": "gridsearch-2.0-3.0-s2-heuristically-set.model"
}
for model_name, saved_filename in models_to_compare.items():
scale_factor = saved_filename
model = tc.L1RegularizedCannonModel(None, None, None)
with open(saved_filename, "rb") as fp:
model._theta, model._s2, model._regularization, model._metadata = pickle.load(fp)
model.pool = None
model.vectorizer = tc.vectorizer.NormalizedPolynomialVectorizer(
labelled_set,
tc.vectorizer.polynomial.terminator(["TEFF", "LOGG", "FE_H"], 2),
scale_factor=0.5)
# Fit the high S/N validation data.
#high_snr_comparison = model.fit(
# normalized_flux[validate_set], normalized_ivar[validate_set])
# Fit the individual visit spectra.
individual_visit_results = {
"SNR": []
}
individual_visit_results.update(
{label: [] for label in model.vectorizer.label_names})
individual_visit_actual_results = {label: [] for label in model.vectorizer.label_names}
apogee_ids = []
N_validate_set_stars = sum(validate_set)
for i, apogee_id in enumerate(labelled_set["APOGEE_ID"][validate_set]):
inv_visit_flux, inv_visit_ivar, metadata = individual_visit_spectra[apogee_id]
# Infer the labels from the individual visits.
inferred_labels = model.fit(inv_visit_flux, inv_visit_ivar)
# Use the labelled set as the reference scale.
match = labelled_set["APOGEE_ID"] == apogee_id
for j, label_name in enumerate(model.vectorizer.label_names):
individual_visit_results[label_name].extend(
inferred_labels[:, j] - labelled_set[label_name][match][0])
individual_visit_actual_results[label_name].extend(
inferred_labels[:, j]
)
individual_visit_results["SNR"].extend(metadata["SNR"])
apogee_ids.extend([apogee_id] * len(metadata["SNR"]))
print(i, apogee_id)
# Now plot the differences.
N_axes = len(individual_visit_results) - 1
fig, axes = plt.subplots(2, 2)
axes = axes.flatten()
K = 200
for k, (ax, label_name) in enumerate(zip(axes, model.vectorizer.label_names)):
x = np.log10(individual_visit_results["SNR"])
y = np.log10(np.abs(individual_visit_results[label_name]))
ind = np.argsort(x)
ax.plot(x, y, "k.", alpha=0.25)
for i in np.arange(0, len(ind)-K, K/10):
ax.plot(np.median(x[ind][i:i+K]), np.median(y[ind][i:i+K]), "ro")
ax.set_xlabel("log10SNR")
ax.set_ylabel(label_name)
ax.set_xlim(1, 2.5)
xlim = np.array(ax.get_xlim())
ax.plot(xlim, -xlim + np.median(x+y), 'r-')
ax.plot(xlim, -xlim + np.mean(x+y), 'b-')
ax.plot(xlim, 0*xlim + np.median(y), 'r')
ax.plot(xlim, 0*xlim + np.mean(y), 'b')
ax.set_ylim((-xlim + np.median(x+y))[::-1])
fig, ax = plt.subplots()
ax.scatter(
np.array(individual_visit_actual_results["TEFF"])[rind],
np.array(individual_visit_actual_results["LOGG"])[rind],
c=np.array(individual_visit_actual_results["FE_H"])[rind]
)
# astronomers are crazy
ax.set_xlim(ax.get_xlim()[::-1])
ax.set_ylim(ax.get_ylim()[::-1])
|
peraktong/AnniesLasso
|
sandbox-scripts/mudbox_compare_models.py
|
Python
|
mit
| 5,549
|
[
"VisIt"
] |
cc68510067dcb86fc3dfd547b7d5b4c990a56ddaa2dec321fb3fe3a925917d4c
|
"""
Galaxy control queue and worker. This is used to handle 'app' control like
reloading the toolbox, etc., across multiple processes.
"""
import logging
import threading
import sys
import galaxy.queues
from galaxy import eggs, util
eggs.require('anyjson')
if sys.version_info < (2, 7, 0):
# Kombu requires importlib and ordereddict to function under Python 2.6.
eggs.require('importlib')
eggs.require('ordereddict')
eggs.require('kombu')
from kombu import Connection
from kombu.mixins import ConsumerMixin
from kombu.pools import producers
log = logging.getLogger(__name__)
class GalaxyQueueWorker(ConsumerMixin, threading.Thread):
"""
This is a flexible worker for galaxy's queues. Each process, web or
handler, will have one of these used for dispatching so called 'control'
tasks.
"""
def __init__(self, app, queue, task_mapping):
super(GalaxyQueueWorker, self).__init__()
log.info("Initalizing Galaxy Queue Worker on %s" % app.config.amqp_internal_connection)
self.connection = Connection(app.config.amqp_internal_connection)
self.app = app
# Eventually we may want different workers w/ their own queues and task
# mappings. Right now, there's only the one.
self.control_queue = queue
self.task_mapping = task_mapping
self.declare_queues = galaxy.queues.all_control_queues_for_declare(app.config)
# TODO we may want to purge the queue at the start to avoid executing
# stale 'reload_tool', etc messages. This can happen if, say, a web
# process goes down and messages get sent before it comes back up.
# Those messages will no longer be useful (in any current case)
def get_consumers(self, Consumer, channel):
return [Consumer(queues=self.control_queue,
callbacks=[self.process_task])]
def process_task(self, body, message):
if body['task'] in self.task_mapping:
if body.get('noop', None) != self.app.config.server_name:
try:
f = self.task_mapping[body['task']]
log.info("Instance recieved '%s' task, executing now." % body['task'])
f(self.app, **body['kwargs'])
except Exception:
# this shouldn't ever throw an exception, but...
log.exception("Error running control task type: %s" % body['task'])
else:
log.warning("Recieved a malformed task message:\n%s" % body)
message.ack()
def shutdown(self):
self.should_stop = True
def send_control_task(trans, task, noop_self=False, kwargs={}):
log.info("Sending %s control task." % task)
payload = {'task': task,
'kwargs': kwargs}
if noop_self:
payload['noop'] = trans.app.config.server_name
try:
c = Connection(trans.app.config.amqp_internal_connection)
with producers[c].acquire(block=True) as producer:
producer.publish(payload, exchange=galaxy.queues.galaxy_exchange,
declare=[galaxy.queues.galaxy_exchange] + galaxy.queues.all_control_queues_for_declare(trans.app.config),
routing_key='control')
except Exception:
# This is likely connection refused.
# TODO Use the specific Exception above.
log.exception("Error sending control task: %s." % payload)
# Tasks -- to be reorganized into a separate module as appropriate. This is
# just an example method. Ideally this gets pushed into atomic tasks, whether
# where they're currently invoked, or elsewhere. (potentially using a dispatch
# decorator).
def reload_tool(app, **kwargs):
params = util.Params(kwargs)
tool_id = params.get('tool_id', None)
log.debug("Executing reload tool task for %s" % tool_id)
if tool_id:
app.toolbox.reload_tool_by_id( tool_id )
else:
log.error("Reload tool invoked without tool id.")
def reload_tool_data_tables(app, **kwargs):
params = util.Params(kwargs)
log.debug("Executing tool data table reload for %s" % params.get('table_names', 'all tables'))
table_names = app.tool_data_tables.reload_tables( table_names=params.get('table_name', None))
log.debug("Finished data table reload for %s" % table_names)
def admin_job_lock(app, **kwargs):
job_lock = kwargs.get('job_lock', False)
# job_queue is exposed in the root app, but this will be 'fixed' at some
# point, so we're using the reference from the handler.
app.job_manager.job_handler.job_queue.job_lock = job_lock
log.info("Administrative Job Lock is now set to %s. Jobs will %s dispatch."
% (job_lock, "not" if job_lock else "now"))
control_message_to_task = { 'reload_tool': reload_tool,
'reload_tool_data_tables': reload_tool_data_tables,
'admin_job_lock': admin_job_lock}
|
mikel-egana-aranguren/SADI-Galaxy-Docker
|
galaxy-dist/lib/galaxy/queue_worker.py
|
Python
|
gpl-3.0
| 4,957
|
[
"Galaxy"
] |
144e0b9830e8d5a3c1841d453fd70a64c00551915278e644d6c4ef27d2f94590
|
"""
@created_at 2014-07-17
@author Exequiel Fuentes <efulet@gmail.com>
@author Brian Keith <briankeithn@gmail.com>
"""
# Se recomienda seguir los siguientes estandares:
# 1. Para codificacion: PEP 8 - Style Guide for Python Code (http://legacy.python.org/dev/peps/pep-0008/)
# 2. Para documentacion: PEP 257 - Docstring Conventions (http://legacy.python.org/dev/peps/pep-0257/)
class GraphException(Exception):
"""GraphException maneja las excepciones para la clase Graph.
Como usar esta clase:
raise GraphException("Arreglo fuera de los limites")
"""
def __init__(self, value):
self.value = value
def __str__(self):
return repr(self.value)
|
efulet/pca
|
pca/lib/graph_exception.py
|
Python
|
mit
| 694
|
[
"Brian"
] |
13fcf916775ed13953fd60ab3c11186c85da82bda75fec816dbc5ad8105b383b
|
# -*- coding: utf-8 -*-
# Copyright 2022 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import os
import mock
import grpc
from grpc.experimental import aio
from collections.abc import Iterable
import json
import math
import pytest
from proto.marshal.rules.dates import DurationRule, TimestampRule
from requests import Response
from requests import Request, PreparedRequest
from requests.sessions import Session
from google.api_core import client_options
from google.api_core import exceptions as core_exceptions
from google.api_core import gapic_v1
from google.api_core import grpc_helpers
from google.api_core import grpc_helpers_async
from google.api_core import path_template
from google.auth import credentials as ga_credentials
from google.auth.exceptions import MutualTLSChannelError
from google.cloud.compute_v1.services.external_vpn_gateways import (
ExternalVpnGatewaysClient,
)
from google.cloud.compute_v1.services.external_vpn_gateways import pagers
from google.cloud.compute_v1.services.external_vpn_gateways import transports
from google.cloud.compute_v1.types import compute
from google.oauth2 import service_account
import google.auth
def client_cert_source_callback():
return b"cert bytes", b"key bytes"
# If default endpoint is localhost, then default mtls endpoint will be the same.
# This method modifies the default endpoint so the client can produce a different
# mtls endpoint for endpoint testing purposes.
def modify_default_endpoint(client):
return (
"foo.googleapis.com"
if ("localhost" in client.DEFAULT_ENDPOINT)
else client.DEFAULT_ENDPOINT
)
def test__get_default_mtls_endpoint():
api_endpoint = "example.googleapis.com"
api_mtls_endpoint = "example.mtls.googleapis.com"
sandbox_endpoint = "example.sandbox.googleapis.com"
sandbox_mtls_endpoint = "example.mtls.sandbox.googleapis.com"
non_googleapi = "api.example.com"
assert ExternalVpnGatewaysClient._get_default_mtls_endpoint(None) is None
assert (
ExternalVpnGatewaysClient._get_default_mtls_endpoint(api_endpoint)
== api_mtls_endpoint
)
assert (
ExternalVpnGatewaysClient._get_default_mtls_endpoint(api_mtls_endpoint)
== api_mtls_endpoint
)
assert (
ExternalVpnGatewaysClient._get_default_mtls_endpoint(sandbox_endpoint)
== sandbox_mtls_endpoint
)
assert (
ExternalVpnGatewaysClient._get_default_mtls_endpoint(sandbox_mtls_endpoint)
== sandbox_mtls_endpoint
)
assert (
ExternalVpnGatewaysClient._get_default_mtls_endpoint(non_googleapi)
== non_googleapi
)
@pytest.mark.parametrize(
"client_class,transport_name", [(ExternalVpnGatewaysClient, "rest"),]
)
def test_external_vpn_gateways_client_from_service_account_info(
client_class, transport_name
):
creds = ga_credentials.AnonymousCredentials()
with mock.patch.object(
service_account.Credentials, "from_service_account_info"
) as factory:
factory.return_value = creds
info = {"valid": True}
client = client_class.from_service_account_info(info, transport=transport_name)
assert client.transport._credentials == creds
assert isinstance(client, client_class)
assert client.transport._host == (
"compute.googleapis.com{}".format(":443")
if transport_name in ["grpc", "grpc_asyncio"]
else "https://{}".format("compute.googleapis.com")
)
@pytest.mark.parametrize(
"transport_class,transport_name",
[(transports.ExternalVpnGatewaysRestTransport, "rest"),],
)
def test_external_vpn_gateways_client_service_account_always_use_jwt(
transport_class, transport_name
):
with mock.patch.object(
service_account.Credentials, "with_always_use_jwt_access", create=True
) as use_jwt:
creds = service_account.Credentials(None, None, None)
transport = transport_class(credentials=creds, always_use_jwt_access=True)
use_jwt.assert_called_once_with(True)
with mock.patch.object(
service_account.Credentials, "with_always_use_jwt_access", create=True
) as use_jwt:
creds = service_account.Credentials(None, None, None)
transport = transport_class(credentials=creds, always_use_jwt_access=False)
use_jwt.assert_not_called()
@pytest.mark.parametrize(
"client_class,transport_name", [(ExternalVpnGatewaysClient, "rest"),]
)
def test_external_vpn_gateways_client_from_service_account_file(
client_class, transport_name
):
creds = ga_credentials.AnonymousCredentials()
with mock.patch.object(
service_account.Credentials, "from_service_account_file"
) as factory:
factory.return_value = creds
client = client_class.from_service_account_file(
"dummy/file/path.json", transport=transport_name
)
assert client.transport._credentials == creds
assert isinstance(client, client_class)
client = client_class.from_service_account_json(
"dummy/file/path.json", transport=transport_name
)
assert client.transport._credentials == creds
assert isinstance(client, client_class)
assert client.transport._host == (
"compute.googleapis.com{}".format(":443")
if transport_name in ["grpc", "grpc_asyncio"]
else "https://{}".format("compute.googleapis.com")
)
def test_external_vpn_gateways_client_get_transport_class():
transport = ExternalVpnGatewaysClient.get_transport_class()
available_transports = [
transports.ExternalVpnGatewaysRestTransport,
]
assert transport in available_transports
transport = ExternalVpnGatewaysClient.get_transport_class("rest")
assert transport == transports.ExternalVpnGatewaysRestTransport
@pytest.mark.parametrize(
"client_class,transport_class,transport_name",
[(ExternalVpnGatewaysClient, transports.ExternalVpnGatewaysRestTransport, "rest"),],
)
@mock.patch.object(
ExternalVpnGatewaysClient,
"DEFAULT_ENDPOINT",
modify_default_endpoint(ExternalVpnGatewaysClient),
)
def test_external_vpn_gateways_client_client_options(
client_class, transport_class, transport_name
):
# Check that if channel is provided we won't create a new one.
with mock.patch.object(ExternalVpnGatewaysClient, "get_transport_class") as gtc:
transport = transport_class(credentials=ga_credentials.AnonymousCredentials())
client = client_class(transport=transport)
gtc.assert_not_called()
# Check that if channel is provided via str we will create a new one.
with mock.patch.object(ExternalVpnGatewaysClient, "get_transport_class") as gtc:
client = client_class(transport=transport_name)
gtc.assert_called()
# Check the case api_endpoint is provided.
options = client_options.ClientOptions(api_endpoint="squid.clam.whelk")
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(transport=transport_name, client_options=options)
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host="squid.clam.whelk",
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
# Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is
# "never".
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "never"}):
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(transport=transport_name)
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=client.DEFAULT_ENDPOINT,
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
# Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is
# "always".
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "always"}):
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(transport=transport_name)
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=client.DEFAULT_MTLS_ENDPOINT,
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
# Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT has
# unsupported value.
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "Unsupported"}):
with pytest.raises(MutualTLSChannelError):
client = client_class(transport=transport_name)
# Check the case GOOGLE_API_USE_CLIENT_CERTIFICATE has unsupported value.
with mock.patch.dict(
os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "Unsupported"}
):
with pytest.raises(ValueError):
client = client_class(transport=transport_name)
# Check the case quota_project_id is provided
options = client_options.ClientOptions(quota_project_id="octopus")
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(client_options=options, transport=transport_name)
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=client.DEFAULT_ENDPOINT,
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id="octopus",
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
@pytest.mark.parametrize(
"client_class,transport_class,transport_name,use_client_cert_env",
[
(
ExternalVpnGatewaysClient,
transports.ExternalVpnGatewaysRestTransport,
"rest",
"true",
),
(
ExternalVpnGatewaysClient,
transports.ExternalVpnGatewaysRestTransport,
"rest",
"false",
),
],
)
@mock.patch.object(
ExternalVpnGatewaysClient,
"DEFAULT_ENDPOINT",
modify_default_endpoint(ExternalVpnGatewaysClient),
)
@mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "auto"})
def test_external_vpn_gateways_client_mtls_env_auto(
client_class, transport_class, transport_name, use_client_cert_env
):
# This tests the endpoint autoswitch behavior. Endpoint is autoswitched to the default
# mtls endpoint, if GOOGLE_API_USE_CLIENT_CERTIFICATE is "true" and client cert exists.
# Check the case client_cert_source is provided. Whether client cert is used depends on
# GOOGLE_API_USE_CLIENT_CERTIFICATE value.
with mock.patch.dict(
os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}
):
options = client_options.ClientOptions(
client_cert_source=client_cert_source_callback
)
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(client_options=options, transport=transport_name)
if use_client_cert_env == "false":
expected_client_cert_source = None
expected_host = client.DEFAULT_ENDPOINT
else:
expected_client_cert_source = client_cert_source_callback
expected_host = client.DEFAULT_MTLS_ENDPOINT
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=expected_host,
scopes=None,
client_cert_source_for_mtls=expected_client_cert_source,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
# Check the case ADC client cert is provided. Whether client cert is used depends on
# GOOGLE_API_USE_CLIENT_CERTIFICATE value.
with mock.patch.dict(
os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}
):
with mock.patch.object(transport_class, "__init__") as patched:
with mock.patch(
"google.auth.transport.mtls.has_default_client_cert_source",
return_value=True,
):
with mock.patch(
"google.auth.transport.mtls.default_client_cert_source",
return_value=client_cert_source_callback,
):
if use_client_cert_env == "false":
expected_host = client.DEFAULT_ENDPOINT
expected_client_cert_source = None
else:
expected_host = client.DEFAULT_MTLS_ENDPOINT
expected_client_cert_source = client_cert_source_callback
patched.return_value = None
client = client_class(transport=transport_name)
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=expected_host,
scopes=None,
client_cert_source_for_mtls=expected_client_cert_source,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
# Check the case client_cert_source and ADC client cert are not provided.
with mock.patch.dict(
os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}
):
with mock.patch.object(transport_class, "__init__") as patched:
with mock.patch(
"google.auth.transport.mtls.has_default_client_cert_source",
return_value=False,
):
patched.return_value = None
client = client_class(transport=transport_name)
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=client.DEFAULT_ENDPOINT,
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
@pytest.mark.parametrize("client_class", [ExternalVpnGatewaysClient])
@mock.patch.object(
ExternalVpnGatewaysClient,
"DEFAULT_ENDPOINT",
modify_default_endpoint(ExternalVpnGatewaysClient),
)
def test_external_vpn_gateways_client_get_mtls_endpoint_and_cert_source(client_class):
mock_client_cert_source = mock.Mock()
# Test the case GOOGLE_API_USE_CLIENT_CERTIFICATE is "true".
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "true"}):
mock_api_endpoint = "foo"
options = client_options.ClientOptions(
client_cert_source=mock_client_cert_source, api_endpoint=mock_api_endpoint
)
api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source(
options
)
assert api_endpoint == mock_api_endpoint
assert cert_source == mock_client_cert_source
# Test the case GOOGLE_API_USE_CLIENT_CERTIFICATE is "false".
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "false"}):
mock_client_cert_source = mock.Mock()
mock_api_endpoint = "foo"
options = client_options.ClientOptions(
client_cert_source=mock_client_cert_source, api_endpoint=mock_api_endpoint
)
api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source(
options
)
assert api_endpoint == mock_api_endpoint
assert cert_source is None
# Test the case GOOGLE_API_USE_MTLS_ENDPOINT is "never".
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "never"}):
api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source()
assert api_endpoint == client_class.DEFAULT_ENDPOINT
assert cert_source is None
# Test the case GOOGLE_API_USE_MTLS_ENDPOINT is "always".
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "always"}):
api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source()
assert api_endpoint == client_class.DEFAULT_MTLS_ENDPOINT
assert cert_source is None
# Test the case GOOGLE_API_USE_MTLS_ENDPOINT is "auto" and default cert doesn't exist.
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "true"}):
with mock.patch(
"google.auth.transport.mtls.has_default_client_cert_source",
return_value=False,
):
api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source()
assert api_endpoint == client_class.DEFAULT_ENDPOINT
assert cert_source is None
# Test the case GOOGLE_API_USE_MTLS_ENDPOINT is "auto" and default cert exists.
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "true"}):
with mock.patch(
"google.auth.transport.mtls.has_default_client_cert_source",
return_value=True,
):
with mock.patch(
"google.auth.transport.mtls.default_client_cert_source",
return_value=mock_client_cert_source,
):
(
api_endpoint,
cert_source,
) = client_class.get_mtls_endpoint_and_cert_source()
assert api_endpoint == client_class.DEFAULT_MTLS_ENDPOINT
assert cert_source == mock_client_cert_source
@pytest.mark.parametrize(
"client_class,transport_class,transport_name",
[(ExternalVpnGatewaysClient, transports.ExternalVpnGatewaysRestTransport, "rest"),],
)
def test_external_vpn_gateways_client_client_options_scopes(
client_class, transport_class, transport_name
):
# Check the case scopes are provided.
options = client_options.ClientOptions(scopes=["1", "2"],)
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(client_options=options, transport=transport_name)
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=client.DEFAULT_ENDPOINT,
scopes=["1", "2"],
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
@pytest.mark.parametrize(
"client_class,transport_class,transport_name,grpc_helpers",
[
(
ExternalVpnGatewaysClient,
transports.ExternalVpnGatewaysRestTransport,
"rest",
None,
),
],
)
def test_external_vpn_gateways_client_client_options_credentials_file(
client_class, transport_class, transport_name, grpc_helpers
):
# Check the case credentials file is provided.
options = client_options.ClientOptions(credentials_file="credentials.json")
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(client_options=options, transport=transport_name)
patched.assert_called_once_with(
credentials=None,
credentials_file="credentials.json",
host=client.DEFAULT_ENDPOINT,
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
@pytest.mark.parametrize(
"request_type", [compute.DeleteExternalVpnGatewayRequest, dict,]
)
def test_delete_unary_rest(request_type):
client = ExternalVpnGatewaysClient(
credentials=ga_credentials.AnonymousCredentials(), transport="rest",
)
# send a request that will satisfy transcoding
request_init = {"project": "sample1", "external_vpn_gateway": "sample2"}
request = request_type(request_init)
# Mock the http request call within the method and fake a response.
with mock.patch.object(type(client.transport._session), "request") as req:
# Designate an appropriate value for the returned response.
return_value = compute.Operation(
client_operation_id="client_operation_id_value",
creation_timestamp="creation_timestamp_value",
description="description_value",
end_time="end_time_value",
http_error_message="http_error_message_value",
http_error_status_code=2374,
id=205,
insert_time="insert_time_value",
kind="kind_value",
name="name_value",
operation_group_id="operation_group_id_value",
operation_type="operation_type_value",
progress=885,
region="region_value",
self_link="self_link_value",
start_time="start_time_value",
status=compute.Operation.Status.DONE,
status_message="status_message_value",
target_id=947,
target_link="target_link_value",
user="user_value",
zone="zone_value",
)
# Wrap the value into a proper Response obj
response_value = Response()
response_value.status_code = 200
json_return_value = compute.Operation.to_json(return_value)
response_value._content = json_return_value.encode("UTF-8")
req.return_value = response_value
response = client.delete_unary(request)
# Establish that the response is the type that we expect.
assert isinstance(response, compute.Operation)
assert response.client_operation_id == "client_operation_id_value"
assert response.creation_timestamp == "creation_timestamp_value"
assert response.description == "description_value"
assert response.end_time == "end_time_value"
assert response.http_error_message == "http_error_message_value"
assert response.http_error_status_code == 2374
assert response.id == 205
assert response.insert_time == "insert_time_value"
assert response.kind == "kind_value"
assert response.name == "name_value"
assert response.operation_group_id == "operation_group_id_value"
assert response.operation_type == "operation_type_value"
assert response.progress == 885
assert response.region == "region_value"
assert response.self_link == "self_link_value"
assert response.start_time == "start_time_value"
assert response.status == compute.Operation.Status.DONE
assert response.status_message == "status_message_value"
assert response.target_id == 947
assert response.target_link == "target_link_value"
assert response.user == "user_value"
assert response.zone == "zone_value"
def test_delete_unary_rest_required_fields(
request_type=compute.DeleteExternalVpnGatewayRequest,
):
transport_class = transports.ExternalVpnGatewaysRestTransport
request_init = {}
request_init["external_vpn_gateway"] = ""
request_init["project"] = ""
request = request_type(request_init)
jsonified_request = json.loads(
request_type.to_json(
request, including_default_value_fields=False, use_integers_for_enums=False
)
)
# verify fields with default values are dropped
unset_fields = transport_class(
credentials=ga_credentials.AnonymousCredentials()
).delete._get_unset_required_fields(jsonified_request)
jsonified_request.update(unset_fields)
# verify required fields with default values are now present
jsonified_request["externalVpnGateway"] = "external_vpn_gateway_value"
jsonified_request["project"] = "project_value"
unset_fields = transport_class(
credentials=ga_credentials.AnonymousCredentials()
).delete._get_unset_required_fields(jsonified_request)
# Check that path parameters and body parameters are not mixing in.
assert not set(unset_fields) - set(("request_id",))
jsonified_request.update(unset_fields)
# verify required fields with non-default values are left alone
assert "externalVpnGateway" in jsonified_request
assert jsonified_request["externalVpnGateway"] == "external_vpn_gateway_value"
assert "project" in jsonified_request
assert jsonified_request["project"] == "project_value"
client = ExternalVpnGatewaysClient(
credentials=ga_credentials.AnonymousCredentials(), transport="rest",
)
request = request_type(request_init)
# Designate an appropriate value for the returned response.
return_value = compute.Operation()
# Mock the http request call within the method and fake a response.
with mock.patch.object(Session, "request") as req:
# We need to mock transcode() because providing default values
# for required fields will fail the real version if the http_options
# expect actual values for those fields.
with mock.patch.object(path_template, "transcode") as transcode:
# A uri without fields and an empty body will force all the
# request fields to show up in the query_params.
transcode_result = {
"uri": "v1/sample_method",
"method": "delete",
"query_params": request_init,
}
transcode.return_value = transcode_result
response_value = Response()
response_value.status_code = 200
json_return_value = compute.Operation.to_json(return_value)
response_value._content = json_return_value.encode("UTF-8")
req.return_value = response_value
response = client.delete_unary(request)
expected_params = []
actual_params = req.call_args.kwargs["params"]
assert expected_params == actual_params
def test_delete_unary_rest_unset_required_fields():
transport = transports.ExternalVpnGatewaysRestTransport(
credentials=ga_credentials.AnonymousCredentials
)
unset_fields = transport.delete._get_unset_required_fields({})
assert set(unset_fields) == (
set(("requestId",)) & set(("externalVpnGateway", "project",))
)
@pytest.mark.parametrize("null_interceptor", [True, False])
def test_delete_unary_rest_interceptors(null_interceptor):
transport = transports.ExternalVpnGatewaysRestTransport(
credentials=ga_credentials.AnonymousCredentials(),
interceptor=None
if null_interceptor
else transports.ExternalVpnGatewaysRestInterceptor(),
)
client = ExternalVpnGatewaysClient(transport=transport)
with mock.patch.object(
type(client.transport._session), "request"
) as req, mock.patch.object(
path_template, "transcode"
) as transcode, mock.patch.object(
transports.ExternalVpnGatewaysRestInterceptor, "post_delete"
) as post, mock.patch.object(
transports.ExternalVpnGatewaysRestInterceptor, "pre_delete"
) as pre:
pre.assert_not_called()
post.assert_not_called()
transcode.return_value = {
"method": "post",
"uri": "my_uri",
"body": None,
"query_params": {},
}
req.return_value = Response()
req.return_value.status_code = 200
req.return_value.request = PreparedRequest()
req.return_value._content = compute.Operation.to_json(compute.Operation())
request = compute.DeleteExternalVpnGatewayRequest()
metadata = [
("key", "val"),
("cephalopod", "squid"),
]
pre.return_value = request, metadata
post.return_value = compute.Operation
client.delete_unary(
request, metadata=[("key", "val"), ("cephalopod", "squid"),]
)
pre.assert_called_once()
post.assert_called_once()
def test_delete_unary_rest_bad_request(
transport: str = "rest", request_type=compute.DeleteExternalVpnGatewayRequest
):
client = ExternalVpnGatewaysClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# send a request that will satisfy transcoding
request_init = {"project": "sample1", "external_vpn_gateway": "sample2"}
request = request_type(request_init)
# Mock the http request call within the method and fake a BadRequest error.
with mock.patch.object(Session, "request") as req, pytest.raises(
core_exceptions.BadRequest
):
# Wrap the value into a proper Response obj
response_value = Response()
response_value.status_code = 400
response_value.request = Request()
req.return_value = response_value
client.delete_unary(request)
def test_delete_unary_rest_flattened():
client = ExternalVpnGatewaysClient(
credentials=ga_credentials.AnonymousCredentials(), transport="rest",
)
# Mock the http request call within the method and fake a response.
with mock.patch.object(type(client.transport._session), "request") as req:
# Designate an appropriate value for the returned response.
return_value = compute.Operation()
# get arguments that satisfy an http rule for this method
sample_request = {"project": "sample1", "external_vpn_gateway": "sample2"}
# get truthy value for each flattened field
mock_args = dict(
project="project_value", external_vpn_gateway="external_vpn_gateway_value",
)
mock_args.update(sample_request)
# Wrap the value into a proper Response obj
response_value = Response()
response_value.status_code = 200
json_return_value = compute.Operation.to_json(return_value)
response_value._content = json_return_value.encode("UTF-8")
req.return_value = response_value
client.delete_unary(**mock_args)
# Establish that the underlying call was made with the expected
# request object values.
assert len(req.mock_calls) == 1
_, args, _ = req.mock_calls[0]
assert path_template.validate(
"%s/compute/v1/projects/{project}/global/externalVpnGateways/{external_vpn_gateway}"
% client.transport._host,
args[1],
)
def test_delete_unary_rest_flattened_error(transport: str = "rest"):
client = ExternalVpnGatewaysClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.delete_unary(
compute.DeleteExternalVpnGatewayRequest(),
project="project_value",
external_vpn_gateway="external_vpn_gateway_value",
)
def test_delete_unary_rest_error():
client = ExternalVpnGatewaysClient(
credentials=ga_credentials.AnonymousCredentials(), transport="rest"
)
@pytest.mark.parametrize("request_type", [compute.GetExternalVpnGatewayRequest, dict,])
def test_get_rest(request_type):
client = ExternalVpnGatewaysClient(
credentials=ga_credentials.AnonymousCredentials(), transport="rest",
)
# send a request that will satisfy transcoding
request_init = {"project": "sample1", "external_vpn_gateway": "sample2"}
request = request_type(request_init)
# Mock the http request call within the method and fake a response.
with mock.patch.object(type(client.transport._session), "request") as req:
# Designate an appropriate value for the returned response.
return_value = compute.ExternalVpnGateway(
creation_timestamp="creation_timestamp_value",
description="description_value",
id=205,
kind="kind_value",
label_fingerprint="label_fingerprint_value",
name="name_value",
redundancy_type="redundancy_type_value",
self_link="self_link_value",
)
# Wrap the value into a proper Response obj
response_value = Response()
response_value.status_code = 200
json_return_value = compute.ExternalVpnGateway.to_json(return_value)
response_value._content = json_return_value.encode("UTF-8")
req.return_value = response_value
response = client.get(request)
# Establish that the response is the type that we expect.
assert isinstance(response, compute.ExternalVpnGateway)
assert response.creation_timestamp == "creation_timestamp_value"
assert response.description == "description_value"
assert response.id == 205
assert response.kind == "kind_value"
assert response.label_fingerprint == "label_fingerprint_value"
assert response.name == "name_value"
assert response.redundancy_type == "redundancy_type_value"
assert response.self_link == "self_link_value"
def test_get_rest_required_fields(request_type=compute.GetExternalVpnGatewayRequest):
transport_class = transports.ExternalVpnGatewaysRestTransport
request_init = {}
request_init["external_vpn_gateway"] = ""
request_init["project"] = ""
request = request_type(request_init)
jsonified_request = json.loads(
request_type.to_json(
request, including_default_value_fields=False, use_integers_for_enums=False
)
)
# verify fields with default values are dropped
unset_fields = transport_class(
credentials=ga_credentials.AnonymousCredentials()
).get._get_unset_required_fields(jsonified_request)
jsonified_request.update(unset_fields)
# verify required fields with default values are now present
jsonified_request["externalVpnGateway"] = "external_vpn_gateway_value"
jsonified_request["project"] = "project_value"
unset_fields = transport_class(
credentials=ga_credentials.AnonymousCredentials()
).get._get_unset_required_fields(jsonified_request)
jsonified_request.update(unset_fields)
# verify required fields with non-default values are left alone
assert "externalVpnGateway" in jsonified_request
assert jsonified_request["externalVpnGateway"] == "external_vpn_gateway_value"
assert "project" in jsonified_request
assert jsonified_request["project"] == "project_value"
client = ExternalVpnGatewaysClient(
credentials=ga_credentials.AnonymousCredentials(), transport="rest",
)
request = request_type(request_init)
# Designate an appropriate value for the returned response.
return_value = compute.ExternalVpnGateway()
# Mock the http request call within the method and fake a response.
with mock.patch.object(Session, "request") as req:
# We need to mock transcode() because providing default values
# for required fields will fail the real version if the http_options
# expect actual values for those fields.
with mock.patch.object(path_template, "transcode") as transcode:
# A uri without fields and an empty body will force all the
# request fields to show up in the query_params.
transcode_result = {
"uri": "v1/sample_method",
"method": "get",
"query_params": request_init,
}
transcode.return_value = transcode_result
response_value = Response()
response_value.status_code = 200
json_return_value = compute.ExternalVpnGateway.to_json(return_value)
response_value._content = json_return_value.encode("UTF-8")
req.return_value = response_value
response = client.get(request)
expected_params = []
actual_params = req.call_args.kwargs["params"]
assert expected_params == actual_params
def test_get_rest_unset_required_fields():
transport = transports.ExternalVpnGatewaysRestTransport(
credentials=ga_credentials.AnonymousCredentials
)
unset_fields = transport.get._get_unset_required_fields({})
assert set(unset_fields) == (set(()) & set(("externalVpnGateway", "project",)))
@pytest.mark.parametrize("null_interceptor", [True, False])
def test_get_rest_interceptors(null_interceptor):
transport = transports.ExternalVpnGatewaysRestTransport(
credentials=ga_credentials.AnonymousCredentials(),
interceptor=None
if null_interceptor
else transports.ExternalVpnGatewaysRestInterceptor(),
)
client = ExternalVpnGatewaysClient(transport=transport)
with mock.patch.object(
type(client.transport._session), "request"
) as req, mock.patch.object(
path_template, "transcode"
) as transcode, mock.patch.object(
transports.ExternalVpnGatewaysRestInterceptor, "post_get"
) as post, mock.patch.object(
transports.ExternalVpnGatewaysRestInterceptor, "pre_get"
) as pre:
pre.assert_not_called()
post.assert_not_called()
transcode.return_value = {
"method": "post",
"uri": "my_uri",
"body": None,
"query_params": {},
}
req.return_value = Response()
req.return_value.status_code = 200
req.return_value.request = PreparedRequest()
req.return_value._content = compute.ExternalVpnGateway.to_json(
compute.ExternalVpnGateway()
)
request = compute.GetExternalVpnGatewayRequest()
metadata = [
("key", "val"),
("cephalopod", "squid"),
]
pre.return_value = request, metadata
post.return_value = compute.ExternalVpnGateway
client.get(request, metadata=[("key", "val"), ("cephalopod", "squid"),])
pre.assert_called_once()
post.assert_called_once()
def test_get_rest_bad_request(
transport: str = "rest", request_type=compute.GetExternalVpnGatewayRequest
):
client = ExternalVpnGatewaysClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# send a request that will satisfy transcoding
request_init = {"project": "sample1", "external_vpn_gateway": "sample2"}
request = request_type(request_init)
# Mock the http request call within the method and fake a BadRequest error.
with mock.patch.object(Session, "request") as req, pytest.raises(
core_exceptions.BadRequest
):
# Wrap the value into a proper Response obj
response_value = Response()
response_value.status_code = 400
response_value.request = Request()
req.return_value = response_value
client.get(request)
def test_get_rest_flattened():
client = ExternalVpnGatewaysClient(
credentials=ga_credentials.AnonymousCredentials(), transport="rest",
)
# Mock the http request call within the method and fake a response.
with mock.patch.object(type(client.transport._session), "request") as req:
# Designate an appropriate value for the returned response.
return_value = compute.ExternalVpnGateway()
# get arguments that satisfy an http rule for this method
sample_request = {"project": "sample1", "external_vpn_gateway": "sample2"}
# get truthy value for each flattened field
mock_args = dict(
project="project_value", external_vpn_gateway="external_vpn_gateway_value",
)
mock_args.update(sample_request)
# Wrap the value into a proper Response obj
response_value = Response()
response_value.status_code = 200
json_return_value = compute.ExternalVpnGateway.to_json(return_value)
response_value._content = json_return_value.encode("UTF-8")
req.return_value = response_value
client.get(**mock_args)
# Establish that the underlying call was made with the expected
# request object values.
assert len(req.mock_calls) == 1
_, args, _ = req.mock_calls[0]
assert path_template.validate(
"%s/compute/v1/projects/{project}/global/externalVpnGateways/{external_vpn_gateway}"
% client.transport._host,
args[1],
)
def test_get_rest_flattened_error(transport: str = "rest"):
client = ExternalVpnGatewaysClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.get(
compute.GetExternalVpnGatewayRequest(),
project="project_value",
external_vpn_gateway="external_vpn_gateway_value",
)
def test_get_rest_error():
client = ExternalVpnGatewaysClient(
credentials=ga_credentials.AnonymousCredentials(), transport="rest"
)
@pytest.mark.parametrize(
"request_type", [compute.InsertExternalVpnGatewayRequest, dict,]
)
def test_insert_unary_rest(request_type):
client = ExternalVpnGatewaysClient(
credentials=ga_credentials.AnonymousCredentials(), transport="rest",
)
# send a request that will satisfy transcoding
request_init = {"project": "sample1"}
request_init["external_vpn_gateway_resource"] = {
"creation_timestamp": "creation_timestamp_value",
"description": "description_value",
"id": 205,
"interfaces": [{"id": 205, "ip_address": "ip_address_value"}],
"kind": "kind_value",
"label_fingerprint": "label_fingerprint_value",
"labels": {},
"name": "name_value",
"redundancy_type": "redundancy_type_value",
"self_link": "self_link_value",
}
request = request_type(request_init)
# Mock the http request call within the method and fake a response.
with mock.patch.object(type(client.transport._session), "request") as req:
# Designate an appropriate value for the returned response.
return_value = compute.Operation(
client_operation_id="client_operation_id_value",
creation_timestamp="creation_timestamp_value",
description="description_value",
end_time="end_time_value",
http_error_message="http_error_message_value",
http_error_status_code=2374,
id=205,
insert_time="insert_time_value",
kind="kind_value",
name="name_value",
operation_group_id="operation_group_id_value",
operation_type="operation_type_value",
progress=885,
region="region_value",
self_link="self_link_value",
start_time="start_time_value",
status=compute.Operation.Status.DONE,
status_message="status_message_value",
target_id=947,
target_link="target_link_value",
user="user_value",
zone="zone_value",
)
# Wrap the value into a proper Response obj
response_value = Response()
response_value.status_code = 200
json_return_value = compute.Operation.to_json(return_value)
response_value._content = json_return_value.encode("UTF-8")
req.return_value = response_value
response = client.insert_unary(request)
# Establish that the response is the type that we expect.
assert isinstance(response, compute.Operation)
assert response.client_operation_id == "client_operation_id_value"
assert response.creation_timestamp == "creation_timestamp_value"
assert response.description == "description_value"
assert response.end_time == "end_time_value"
assert response.http_error_message == "http_error_message_value"
assert response.http_error_status_code == 2374
assert response.id == 205
assert response.insert_time == "insert_time_value"
assert response.kind == "kind_value"
assert response.name == "name_value"
assert response.operation_group_id == "operation_group_id_value"
assert response.operation_type == "operation_type_value"
assert response.progress == 885
assert response.region == "region_value"
assert response.self_link == "self_link_value"
assert response.start_time == "start_time_value"
assert response.status == compute.Operation.Status.DONE
assert response.status_message == "status_message_value"
assert response.target_id == 947
assert response.target_link == "target_link_value"
assert response.user == "user_value"
assert response.zone == "zone_value"
def test_insert_unary_rest_required_fields(
request_type=compute.InsertExternalVpnGatewayRequest,
):
transport_class = transports.ExternalVpnGatewaysRestTransport
request_init = {}
request_init["project"] = ""
request = request_type(request_init)
jsonified_request = json.loads(
request_type.to_json(
request, including_default_value_fields=False, use_integers_for_enums=False
)
)
# verify fields with default values are dropped
unset_fields = transport_class(
credentials=ga_credentials.AnonymousCredentials()
).insert._get_unset_required_fields(jsonified_request)
jsonified_request.update(unset_fields)
# verify required fields with default values are now present
jsonified_request["project"] = "project_value"
unset_fields = transport_class(
credentials=ga_credentials.AnonymousCredentials()
).insert._get_unset_required_fields(jsonified_request)
# Check that path parameters and body parameters are not mixing in.
assert not set(unset_fields) - set(("request_id",))
jsonified_request.update(unset_fields)
# verify required fields with non-default values are left alone
assert "project" in jsonified_request
assert jsonified_request["project"] == "project_value"
client = ExternalVpnGatewaysClient(
credentials=ga_credentials.AnonymousCredentials(), transport="rest",
)
request = request_type(request_init)
# Designate an appropriate value for the returned response.
return_value = compute.Operation()
# Mock the http request call within the method and fake a response.
with mock.patch.object(Session, "request") as req:
# We need to mock transcode() because providing default values
# for required fields will fail the real version if the http_options
# expect actual values for those fields.
with mock.patch.object(path_template, "transcode") as transcode:
# A uri without fields and an empty body will force all the
# request fields to show up in the query_params.
transcode_result = {
"uri": "v1/sample_method",
"method": "post",
"query_params": request_init,
}
transcode_result["body"] = {}
transcode.return_value = transcode_result
response_value = Response()
response_value.status_code = 200
json_return_value = compute.Operation.to_json(return_value)
response_value._content = json_return_value.encode("UTF-8")
req.return_value = response_value
response = client.insert_unary(request)
expected_params = []
actual_params = req.call_args.kwargs["params"]
assert expected_params == actual_params
def test_insert_unary_rest_unset_required_fields():
transport = transports.ExternalVpnGatewaysRestTransport(
credentials=ga_credentials.AnonymousCredentials
)
unset_fields = transport.insert._get_unset_required_fields({})
assert set(unset_fields) == (
set(("requestId",)) & set(("externalVpnGatewayResource", "project",))
)
@pytest.mark.parametrize("null_interceptor", [True, False])
def test_insert_unary_rest_interceptors(null_interceptor):
transport = transports.ExternalVpnGatewaysRestTransport(
credentials=ga_credentials.AnonymousCredentials(),
interceptor=None
if null_interceptor
else transports.ExternalVpnGatewaysRestInterceptor(),
)
client = ExternalVpnGatewaysClient(transport=transport)
with mock.patch.object(
type(client.transport._session), "request"
) as req, mock.patch.object(
path_template, "transcode"
) as transcode, mock.patch.object(
transports.ExternalVpnGatewaysRestInterceptor, "post_insert"
) as post, mock.patch.object(
transports.ExternalVpnGatewaysRestInterceptor, "pre_insert"
) as pre:
pre.assert_not_called()
post.assert_not_called()
transcode.return_value = {
"method": "post",
"uri": "my_uri",
"body": None,
"query_params": {},
}
req.return_value = Response()
req.return_value.status_code = 200
req.return_value.request = PreparedRequest()
req.return_value._content = compute.Operation.to_json(compute.Operation())
request = compute.InsertExternalVpnGatewayRequest()
metadata = [
("key", "val"),
("cephalopod", "squid"),
]
pre.return_value = request, metadata
post.return_value = compute.Operation
client.insert_unary(
request, metadata=[("key", "val"), ("cephalopod", "squid"),]
)
pre.assert_called_once()
post.assert_called_once()
def test_insert_unary_rest_bad_request(
transport: str = "rest", request_type=compute.InsertExternalVpnGatewayRequest
):
client = ExternalVpnGatewaysClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# send a request that will satisfy transcoding
request_init = {"project": "sample1"}
request_init["external_vpn_gateway_resource"] = {
"creation_timestamp": "creation_timestamp_value",
"description": "description_value",
"id": 205,
"interfaces": [{"id": 205, "ip_address": "ip_address_value"}],
"kind": "kind_value",
"label_fingerprint": "label_fingerprint_value",
"labels": {},
"name": "name_value",
"redundancy_type": "redundancy_type_value",
"self_link": "self_link_value",
}
request = request_type(request_init)
# Mock the http request call within the method and fake a BadRequest error.
with mock.patch.object(Session, "request") as req, pytest.raises(
core_exceptions.BadRequest
):
# Wrap the value into a proper Response obj
response_value = Response()
response_value.status_code = 400
response_value.request = Request()
req.return_value = response_value
client.insert_unary(request)
def test_insert_unary_rest_flattened():
client = ExternalVpnGatewaysClient(
credentials=ga_credentials.AnonymousCredentials(), transport="rest",
)
# Mock the http request call within the method and fake a response.
with mock.patch.object(type(client.transport._session), "request") as req:
# Designate an appropriate value for the returned response.
return_value = compute.Operation()
# get arguments that satisfy an http rule for this method
sample_request = {"project": "sample1"}
# get truthy value for each flattened field
mock_args = dict(
project="project_value",
external_vpn_gateway_resource=compute.ExternalVpnGateway(
creation_timestamp="creation_timestamp_value"
),
)
mock_args.update(sample_request)
# Wrap the value into a proper Response obj
response_value = Response()
response_value.status_code = 200
json_return_value = compute.Operation.to_json(return_value)
response_value._content = json_return_value.encode("UTF-8")
req.return_value = response_value
client.insert_unary(**mock_args)
# Establish that the underlying call was made with the expected
# request object values.
assert len(req.mock_calls) == 1
_, args, _ = req.mock_calls[0]
assert path_template.validate(
"%s/compute/v1/projects/{project}/global/externalVpnGateways"
% client.transport._host,
args[1],
)
def test_insert_unary_rest_flattened_error(transport: str = "rest"):
client = ExternalVpnGatewaysClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.insert_unary(
compute.InsertExternalVpnGatewayRequest(),
project="project_value",
external_vpn_gateway_resource=compute.ExternalVpnGateway(
creation_timestamp="creation_timestamp_value"
),
)
def test_insert_unary_rest_error():
client = ExternalVpnGatewaysClient(
credentials=ga_credentials.AnonymousCredentials(), transport="rest"
)
@pytest.mark.parametrize(
"request_type", [compute.ListExternalVpnGatewaysRequest, dict,]
)
def test_list_rest(request_type):
client = ExternalVpnGatewaysClient(
credentials=ga_credentials.AnonymousCredentials(), transport="rest",
)
# send a request that will satisfy transcoding
request_init = {"project": "sample1"}
request = request_type(request_init)
# Mock the http request call within the method and fake a response.
with mock.patch.object(type(client.transport._session), "request") as req:
# Designate an appropriate value for the returned response.
return_value = compute.ExternalVpnGatewayList(
etag="etag_value",
id="id_value",
kind="kind_value",
next_page_token="next_page_token_value",
self_link="self_link_value",
)
# Wrap the value into a proper Response obj
response_value = Response()
response_value.status_code = 200
json_return_value = compute.ExternalVpnGatewayList.to_json(return_value)
response_value._content = json_return_value.encode("UTF-8")
req.return_value = response_value
response = client.list(request)
# Establish that the response is the type that we expect.
assert isinstance(response, pagers.ListPager)
assert response.etag == "etag_value"
assert response.id == "id_value"
assert response.kind == "kind_value"
assert response.next_page_token == "next_page_token_value"
assert response.self_link == "self_link_value"
def test_list_rest_required_fields(request_type=compute.ListExternalVpnGatewaysRequest):
transport_class = transports.ExternalVpnGatewaysRestTransport
request_init = {}
request_init["project"] = ""
request = request_type(request_init)
jsonified_request = json.loads(
request_type.to_json(
request, including_default_value_fields=False, use_integers_for_enums=False
)
)
# verify fields with default values are dropped
unset_fields = transport_class(
credentials=ga_credentials.AnonymousCredentials()
).list._get_unset_required_fields(jsonified_request)
jsonified_request.update(unset_fields)
# verify required fields with default values are now present
jsonified_request["project"] = "project_value"
unset_fields = transport_class(
credentials=ga_credentials.AnonymousCredentials()
).list._get_unset_required_fields(jsonified_request)
# Check that path parameters and body parameters are not mixing in.
assert not set(unset_fields) - set(
("filter", "max_results", "order_by", "page_token", "return_partial_success",)
)
jsonified_request.update(unset_fields)
# verify required fields with non-default values are left alone
assert "project" in jsonified_request
assert jsonified_request["project"] == "project_value"
client = ExternalVpnGatewaysClient(
credentials=ga_credentials.AnonymousCredentials(), transport="rest",
)
request = request_type(request_init)
# Designate an appropriate value for the returned response.
return_value = compute.ExternalVpnGatewayList()
# Mock the http request call within the method and fake a response.
with mock.patch.object(Session, "request") as req:
# We need to mock transcode() because providing default values
# for required fields will fail the real version if the http_options
# expect actual values for those fields.
with mock.patch.object(path_template, "transcode") as transcode:
# A uri without fields and an empty body will force all the
# request fields to show up in the query_params.
transcode_result = {
"uri": "v1/sample_method",
"method": "get",
"query_params": request_init,
}
transcode.return_value = transcode_result
response_value = Response()
response_value.status_code = 200
json_return_value = compute.ExternalVpnGatewayList.to_json(return_value)
response_value._content = json_return_value.encode("UTF-8")
req.return_value = response_value
response = client.list(request)
expected_params = []
actual_params = req.call_args.kwargs["params"]
assert expected_params == actual_params
def test_list_rest_unset_required_fields():
transport = transports.ExternalVpnGatewaysRestTransport(
credentials=ga_credentials.AnonymousCredentials
)
unset_fields = transport.list._get_unset_required_fields({})
assert set(unset_fields) == (
set(("filter", "maxResults", "orderBy", "pageToken", "returnPartialSuccess",))
& set(("project",))
)
@pytest.mark.parametrize("null_interceptor", [True, False])
def test_list_rest_interceptors(null_interceptor):
transport = transports.ExternalVpnGatewaysRestTransport(
credentials=ga_credentials.AnonymousCredentials(),
interceptor=None
if null_interceptor
else transports.ExternalVpnGatewaysRestInterceptor(),
)
client = ExternalVpnGatewaysClient(transport=transport)
with mock.patch.object(
type(client.transport._session), "request"
) as req, mock.patch.object(
path_template, "transcode"
) as transcode, mock.patch.object(
transports.ExternalVpnGatewaysRestInterceptor, "post_list"
) as post, mock.patch.object(
transports.ExternalVpnGatewaysRestInterceptor, "pre_list"
) as pre:
pre.assert_not_called()
post.assert_not_called()
transcode.return_value = {
"method": "post",
"uri": "my_uri",
"body": None,
"query_params": {},
}
req.return_value = Response()
req.return_value.status_code = 200
req.return_value.request = PreparedRequest()
req.return_value._content = compute.ExternalVpnGatewayList.to_json(
compute.ExternalVpnGatewayList()
)
request = compute.ListExternalVpnGatewaysRequest()
metadata = [
("key", "val"),
("cephalopod", "squid"),
]
pre.return_value = request, metadata
post.return_value = compute.ExternalVpnGatewayList
client.list(request, metadata=[("key", "val"), ("cephalopod", "squid"),])
pre.assert_called_once()
post.assert_called_once()
def test_list_rest_bad_request(
transport: str = "rest", request_type=compute.ListExternalVpnGatewaysRequest
):
client = ExternalVpnGatewaysClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# send a request that will satisfy transcoding
request_init = {"project": "sample1"}
request = request_type(request_init)
# Mock the http request call within the method and fake a BadRequest error.
with mock.patch.object(Session, "request") as req, pytest.raises(
core_exceptions.BadRequest
):
# Wrap the value into a proper Response obj
response_value = Response()
response_value.status_code = 400
response_value.request = Request()
req.return_value = response_value
client.list(request)
def test_list_rest_flattened():
client = ExternalVpnGatewaysClient(
credentials=ga_credentials.AnonymousCredentials(), transport="rest",
)
# Mock the http request call within the method and fake a response.
with mock.patch.object(type(client.transport._session), "request") as req:
# Designate an appropriate value for the returned response.
return_value = compute.ExternalVpnGatewayList()
# get arguments that satisfy an http rule for this method
sample_request = {"project": "sample1"}
# get truthy value for each flattened field
mock_args = dict(project="project_value",)
mock_args.update(sample_request)
# Wrap the value into a proper Response obj
response_value = Response()
response_value.status_code = 200
json_return_value = compute.ExternalVpnGatewayList.to_json(return_value)
response_value._content = json_return_value.encode("UTF-8")
req.return_value = response_value
client.list(**mock_args)
# Establish that the underlying call was made with the expected
# request object values.
assert len(req.mock_calls) == 1
_, args, _ = req.mock_calls[0]
assert path_template.validate(
"%s/compute/v1/projects/{project}/global/externalVpnGateways"
% client.transport._host,
args[1],
)
def test_list_rest_flattened_error(transport: str = "rest"):
client = ExternalVpnGatewaysClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.list(
compute.ListExternalVpnGatewaysRequest(), project="project_value",
)
def test_list_rest_pager(transport: str = "rest"):
client = ExternalVpnGatewaysClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Mock the http request call within the method and fake a response.
with mock.patch.object(Session, "request") as req:
# TODO(kbandes): remove this mock unless there's a good reason for it.
# with mock.patch.object(path_template, 'transcode') as transcode:
# Set the response as a series of pages
response = (
compute.ExternalVpnGatewayList(
items=[
compute.ExternalVpnGateway(),
compute.ExternalVpnGateway(),
compute.ExternalVpnGateway(),
],
next_page_token="abc",
),
compute.ExternalVpnGatewayList(items=[], next_page_token="def",),
compute.ExternalVpnGatewayList(
items=[compute.ExternalVpnGateway(),], next_page_token="ghi",
),
compute.ExternalVpnGatewayList(
items=[compute.ExternalVpnGateway(), compute.ExternalVpnGateway(),],
),
)
# Two responses for two calls
response = response + response
# Wrap the values into proper Response objs
response = tuple(compute.ExternalVpnGatewayList.to_json(x) for x in response)
return_values = tuple(Response() for i in response)
for return_val, response_val in zip(return_values, response):
return_val._content = response_val.encode("UTF-8")
return_val.status_code = 200
req.side_effect = return_values
sample_request = {"project": "sample1"}
pager = client.list(request=sample_request)
results = list(pager)
assert len(results) == 6
assert all(isinstance(i, compute.ExternalVpnGateway) for i in results)
pages = list(client.list(request=sample_request).pages)
for page_, token in zip(pages, ["abc", "def", "ghi", ""]):
assert page_.raw_page.next_page_token == token
@pytest.mark.parametrize(
"request_type", [compute.SetLabelsExternalVpnGatewayRequest, dict,]
)
def test_set_labels_unary_rest(request_type):
client = ExternalVpnGatewaysClient(
credentials=ga_credentials.AnonymousCredentials(), transport="rest",
)
# send a request that will satisfy transcoding
request_init = {"project": "sample1", "resource": "sample2"}
request_init["global_set_labels_request_resource"] = {
"label_fingerprint": "label_fingerprint_value",
"labels": {},
}
request = request_type(request_init)
# Mock the http request call within the method and fake a response.
with mock.patch.object(type(client.transport._session), "request") as req:
# Designate an appropriate value for the returned response.
return_value = compute.Operation(
client_operation_id="client_operation_id_value",
creation_timestamp="creation_timestamp_value",
description="description_value",
end_time="end_time_value",
http_error_message="http_error_message_value",
http_error_status_code=2374,
id=205,
insert_time="insert_time_value",
kind="kind_value",
name="name_value",
operation_group_id="operation_group_id_value",
operation_type="operation_type_value",
progress=885,
region="region_value",
self_link="self_link_value",
start_time="start_time_value",
status=compute.Operation.Status.DONE,
status_message="status_message_value",
target_id=947,
target_link="target_link_value",
user="user_value",
zone="zone_value",
)
# Wrap the value into a proper Response obj
response_value = Response()
response_value.status_code = 200
json_return_value = compute.Operation.to_json(return_value)
response_value._content = json_return_value.encode("UTF-8")
req.return_value = response_value
response = client.set_labels_unary(request)
# Establish that the response is the type that we expect.
assert isinstance(response, compute.Operation)
assert response.client_operation_id == "client_operation_id_value"
assert response.creation_timestamp == "creation_timestamp_value"
assert response.description == "description_value"
assert response.end_time == "end_time_value"
assert response.http_error_message == "http_error_message_value"
assert response.http_error_status_code == 2374
assert response.id == 205
assert response.insert_time == "insert_time_value"
assert response.kind == "kind_value"
assert response.name == "name_value"
assert response.operation_group_id == "operation_group_id_value"
assert response.operation_type == "operation_type_value"
assert response.progress == 885
assert response.region == "region_value"
assert response.self_link == "self_link_value"
assert response.start_time == "start_time_value"
assert response.status == compute.Operation.Status.DONE
assert response.status_message == "status_message_value"
assert response.target_id == 947
assert response.target_link == "target_link_value"
assert response.user == "user_value"
assert response.zone == "zone_value"
def test_set_labels_unary_rest_required_fields(
request_type=compute.SetLabelsExternalVpnGatewayRequest,
):
transport_class = transports.ExternalVpnGatewaysRestTransport
request_init = {}
request_init["project"] = ""
request_init["resource"] = ""
request = request_type(request_init)
jsonified_request = json.loads(
request_type.to_json(
request, including_default_value_fields=False, use_integers_for_enums=False
)
)
# verify fields with default values are dropped
unset_fields = transport_class(
credentials=ga_credentials.AnonymousCredentials()
).set_labels._get_unset_required_fields(jsonified_request)
jsonified_request.update(unset_fields)
# verify required fields with default values are now present
jsonified_request["project"] = "project_value"
jsonified_request["resource"] = "resource_value"
unset_fields = transport_class(
credentials=ga_credentials.AnonymousCredentials()
).set_labels._get_unset_required_fields(jsonified_request)
jsonified_request.update(unset_fields)
# verify required fields with non-default values are left alone
assert "project" in jsonified_request
assert jsonified_request["project"] == "project_value"
assert "resource" in jsonified_request
assert jsonified_request["resource"] == "resource_value"
client = ExternalVpnGatewaysClient(
credentials=ga_credentials.AnonymousCredentials(), transport="rest",
)
request = request_type(request_init)
# Designate an appropriate value for the returned response.
return_value = compute.Operation()
# Mock the http request call within the method and fake a response.
with mock.patch.object(Session, "request") as req:
# We need to mock transcode() because providing default values
# for required fields will fail the real version if the http_options
# expect actual values for those fields.
with mock.patch.object(path_template, "transcode") as transcode:
# A uri without fields and an empty body will force all the
# request fields to show up in the query_params.
transcode_result = {
"uri": "v1/sample_method",
"method": "post",
"query_params": request_init,
}
transcode_result["body"] = {}
transcode.return_value = transcode_result
response_value = Response()
response_value.status_code = 200
json_return_value = compute.Operation.to_json(return_value)
response_value._content = json_return_value.encode("UTF-8")
req.return_value = response_value
response = client.set_labels_unary(request)
expected_params = []
actual_params = req.call_args.kwargs["params"]
assert expected_params == actual_params
def test_set_labels_unary_rest_unset_required_fields():
transport = transports.ExternalVpnGatewaysRestTransport(
credentials=ga_credentials.AnonymousCredentials
)
unset_fields = transport.set_labels._get_unset_required_fields({})
assert set(unset_fields) == (
set(()) & set(("globalSetLabelsRequestResource", "project", "resource",))
)
@pytest.mark.parametrize("null_interceptor", [True, False])
def test_set_labels_unary_rest_interceptors(null_interceptor):
transport = transports.ExternalVpnGatewaysRestTransport(
credentials=ga_credentials.AnonymousCredentials(),
interceptor=None
if null_interceptor
else transports.ExternalVpnGatewaysRestInterceptor(),
)
client = ExternalVpnGatewaysClient(transport=transport)
with mock.patch.object(
type(client.transport._session), "request"
) as req, mock.patch.object(
path_template, "transcode"
) as transcode, mock.patch.object(
transports.ExternalVpnGatewaysRestInterceptor, "post_set_labels"
) as post, mock.patch.object(
transports.ExternalVpnGatewaysRestInterceptor, "pre_set_labels"
) as pre:
pre.assert_not_called()
post.assert_not_called()
transcode.return_value = {
"method": "post",
"uri": "my_uri",
"body": None,
"query_params": {},
}
req.return_value = Response()
req.return_value.status_code = 200
req.return_value.request = PreparedRequest()
req.return_value._content = compute.Operation.to_json(compute.Operation())
request = compute.SetLabelsExternalVpnGatewayRequest()
metadata = [
("key", "val"),
("cephalopod", "squid"),
]
pre.return_value = request, metadata
post.return_value = compute.Operation
client.set_labels_unary(
request, metadata=[("key", "val"), ("cephalopod", "squid"),]
)
pre.assert_called_once()
post.assert_called_once()
def test_set_labels_unary_rest_bad_request(
transport: str = "rest", request_type=compute.SetLabelsExternalVpnGatewayRequest
):
client = ExternalVpnGatewaysClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# send a request that will satisfy transcoding
request_init = {"project": "sample1", "resource": "sample2"}
request_init["global_set_labels_request_resource"] = {
"label_fingerprint": "label_fingerprint_value",
"labels": {},
}
request = request_type(request_init)
# Mock the http request call within the method and fake a BadRequest error.
with mock.patch.object(Session, "request") as req, pytest.raises(
core_exceptions.BadRequest
):
# Wrap the value into a proper Response obj
response_value = Response()
response_value.status_code = 400
response_value.request = Request()
req.return_value = response_value
client.set_labels_unary(request)
def test_set_labels_unary_rest_flattened():
client = ExternalVpnGatewaysClient(
credentials=ga_credentials.AnonymousCredentials(), transport="rest",
)
# Mock the http request call within the method and fake a response.
with mock.patch.object(type(client.transport._session), "request") as req:
# Designate an appropriate value for the returned response.
return_value = compute.Operation()
# get arguments that satisfy an http rule for this method
sample_request = {"project": "sample1", "resource": "sample2"}
# get truthy value for each flattened field
mock_args = dict(
project="project_value",
resource="resource_value",
global_set_labels_request_resource=compute.GlobalSetLabelsRequest(
label_fingerprint="label_fingerprint_value"
),
)
mock_args.update(sample_request)
# Wrap the value into a proper Response obj
response_value = Response()
response_value.status_code = 200
json_return_value = compute.Operation.to_json(return_value)
response_value._content = json_return_value.encode("UTF-8")
req.return_value = response_value
client.set_labels_unary(**mock_args)
# Establish that the underlying call was made with the expected
# request object values.
assert len(req.mock_calls) == 1
_, args, _ = req.mock_calls[0]
assert path_template.validate(
"%s/compute/v1/projects/{project}/global/externalVpnGateways/{resource}/setLabels"
% client.transport._host,
args[1],
)
def test_set_labels_unary_rest_flattened_error(transport: str = "rest"):
client = ExternalVpnGatewaysClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.set_labels_unary(
compute.SetLabelsExternalVpnGatewayRequest(),
project="project_value",
resource="resource_value",
global_set_labels_request_resource=compute.GlobalSetLabelsRequest(
label_fingerprint="label_fingerprint_value"
),
)
def test_set_labels_unary_rest_error():
client = ExternalVpnGatewaysClient(
credentials=ga_credentials.AnonymousCredentials(), transport="rest"
)
@pytest.mark.parametrize(
"request_type", [compute.TestIamPermissionsExternalVpnGatewayRequest, dict,]
)
def test_test_iam_permissions_rest(request_type):
client = ExternalVpnGatewaysClient(
credentials=ga_credentials.AnonymousCredentials(), transport="rest",
)
# send a request that will satisfy transcoding
request_init = {"project": "sample1", "resource": "sample2"}
request_init["test_permissions_request_resource"] = {
"permissions": ["permissions_value_1", "permissions_value_2"]
}
request = request_type(request_init)
# Mock the http request call within the method and fake a response.
with mock.patch.object(type(client.transport._session), "request") as req:
# Designate an appropriate value for the returned response.
return_value = compute.TestPermissionsResponse(
permissions=["permissions_value"],
)
# Wrap the value into a proper Response obj
response_value = Response()
response_value.status_code = 200
json_return_value = compute.TestPermissionsResponse.to_json(return_value)
response_value._content = json_return_value.encode("UTF-8")
req.return_value = response_value
response = client.test_iam_permissions(request)
# Establish that the response is the type that we expect.
assert isinstance(response, compute.TestPermissionsResponse)
assert response.permissions == ["permissions_value"]
def test_test_iam_permissions_rest_required_fields(
request_type=compute.TestIamPermissionsExternalVpnGatewayRequest,
):
transport_class = transports.ExternalVpnGatewaysRestTransport
request_init = {}
request_init["project"] = ""
request_init["resource"] = ""
request = request_type(request_init)
jsonified_request = json.loads(
request_type.to_json(
request, including_default_value_fields=False, use_integers_for_enums=False
)
)
# verify fields with default values are dropped
unset_fields = transport_class(
credentials=ga_credentials.AnonymousCredentials()
).test_iam_permissions._get_unset_required_fields(jsonified_request)
jsonified_request.update(unset_fields)
# verify required fields with default values are now present
jsonified_request["project"] = "project_value"
jsonified_request["resource"] = "resource_value"
unset_fields = transport_class(
credentials=ga_credentials.AnonymousCredentials()
).test_iam_permissions._get_unset_required_fields(jsonified_request)
jsonified_request.update(unset_fields)
# verify required fields with non-default values are left alone
assert "project" in jsonified_request
assert jsonified_request["project"] == "project_value"
assert "resource" in jsonified_request
assert jsonified_request["resource"] == "resource_value"
client = ExternalVpnGatewaysClient(
credentials=ga_credentials.AnonymousCredentials(), transport="rest",
)
request = request_type(request_init)
# Designate an appropriate value for the returned response.
return_value = compute.TestPermissionsResponse()
# Mock the http request call within the method and fake a response.
with mock.patch.object(Session, "request") as req:
# We need to mock transcode() because providing default values
# for required fields will fail the real version if the http_options
# expect actual values for those fields.
with mock.patch.object(path_template, "transcode") as transcode:
# A uri without fields and an empty body will force all the
# request fields to show up in the query_params.
transcode_result = {
"uri": "v1/sample_method",
"method": "post",
"query_params": request_init,
}
transcode_result["body"] = {}
transcode.return_value = transcode_result
response_value = Response()
response_value.status_code = 200
json_return_value = compute.TestPermissionsResponse.to_json(return_value)
response_value._content = json_return_value.encode("UTF-8")
req.return_value = response_value
response = client.test_iam_permissions(request)
expected_params = []
actual_params = req.call_args.kwargs["params"]
assert expected_params == actual_params
def test_test_iam_permissions_rest_unset_required_fields():
transport = transports.ExternalVpnGatewaysRestTransport(
credentials=ga_credentials.AnonymousCredentials
)
unset_fields = transport.test_iam_permissions._get_unset_required_fields({})
assert set(unset_fields) == (
set(()) & set(("project", "resource", "testPermissionsRequestResource",))
)
@pytest.mark.parametrize("null_interceptor", [True, False])
def test_test_iam_permissions_rest_interceptors(null_interceptor):
transport = transports.ExternalVpnGatewaysRestTransport(
credentials=ga_credentials.AnonymousCredentials(),
interceptor=None
if null_interceptor
else transports.ExternalVpnGatewaysRestInterceptor(),
)
client = ExternalVpnGatewaysClient(transport=transport)
with mock.patch.object(
type(client.transport._session), "request"
) as req, mock.patch.object(
path_template, "transcode"
) as transcode, mock.patch.object(
transports.ExternalVpnGatewaysRestInterceptor, "post_test_iam_permissions"
) as post, mock.patch.object(
transports.ExternalVpnGatewaysRestInterceptor, "pre_test_iam_permissions"
) as pre:
pre.assert_not_called()
post.assert_not_called()
transcode.return_value = {
"method": "post",
"uri": "my_uri",
"body": None,
"query_params": {},
}
req.return_value = Response()
req.return_value.status_code = 200
req.return_value.request = PreparedRequest()
req.return_value._content = compute.TestPermissionsResponse.to_json(
compute.TestPermissionsResponse()
)
request = compute.TestIamPermissionsExternalVpnGatewayRequest()
metadata = [
("key", "val"),
("cephalopod", "squid"),
]
pre.return_value = request, metadata
post.return_value = compute.TestPermissionsResponse
client.test_iam_permissions(
request, metadata=[("key", "val"), ("cephalopod", "squid"),]
)
pre.assert_called_once()
post.assert_called_once()
def test_test_iam_permissions_rest_bad_request(
transport: str = "rest",
request_type=compute.TestIamPermissionsExternalVpnGatewayRequest,
):
client = ExternalVpnGatewaysClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# send a request that will satisfy transcoding
request_init = {"project": "sample1", "resource": "sample2"}
request_init["test_permissions_request_resource"] = {
"permissions": ["permissions_value_1", "permissions_value_2"]
}
request = request_type(request_init)
# Mock the http request call within the method and fake a BadRequest error.
with mock.patch.object(Session, "request") as req, pytest.raises(
core_exceptions.BadRequest
):
# Wrap the value into a proper Response obj
response_value = Response()
response_value.status_code = 400
response_value.request = Request()
req.return_value = response_value
client.test_iam_permissions(request)
def test_test_iam_permissions_rest_flattened():
client = ExternalVpnGatewaysClient(
credentials=ga_credentials.AnonymousCredentials(), transport="rest",
)
# Mock the http request call within the method and fake a response.
with mock.patch.object(type(client.transport._session), "request") as req:
# Designate an appropriate value for the returned response.
return_value = compute.TestPermissionsResponse()
# get arguments that satisfy an http rule for this method
sample_request = {"project": "sample1", "resource": "sample2"}
# get truthy value for each flattened field
mock_args = dict(
project="project_value",
resource="resource_value",
test_permissions_request_resource=compute.TestPermissionsRequest(
permissions=["permissions_value"]
),
)
mock_args.update(sample_request)
# Wrap the value into a proper Response obj
response_value = Response()
response_value.status_code = 200
json_return_value = compute.TestPermissionsResponse.to_json(return_value)
response_value._content = json_return_value.encode("UTF-8")
req.return_value = response_value
client.test_iam_permissions(**mock_args)
# Establish that the underlying call was made with the expected
# request object values.
assert len(req.mock_calls) == 1
_, args, _ = req.mock_calls[0]
assert path_template.validate(
"%s/compute/v1/projects/{project}/global/externalVpnGateways/{resource}/testIamPermissions"
% client.transport._host,
args[1],
)
def test_test_iam_permissions_rest_flattened_error(transport: str = "rest"):
client = ExternalVpnGatewaysClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.test_iam_permissions(
compute.TestIamPermissionsExternalVpnGatewayRequest(),
project="project_value",
resource="resource_value",
test_permissions_request_resource=compute.TestPermissionsRequest(
permissions=["permissions_value"]
),
)
def test_test_iam_permissions_rest_error():
client = ExternalVpnGatewaysClient(
credentials=ga_credentials.AnonymousCredentials(), transport="rest"
)
def test_credentials_transport_error():
# It is an error to provide credentials and a transport instance.
transport = transports.ExternalVpnGatewaysRestTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
with pytest.raises(ValueError):
client = ExternalVpnGatewaysClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# It is an error to provide a credentials file and a transport instance.
transport = transports.ExternalVpnGatewaysRestTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
with pytest.raises(ValueError):
client = ExternalVpnGatewaysClient(
client_options={"credentials_file": "credentials.json"},
transport=transport,
)
# It is an error to provide an api_key and a transport instance.
transport = transports.ExternalVpnGatewaysRestTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
options = client_options.ClientOptions()
options.api_key = "api_key"
with pytest.raises(ValueError):
client = ExternalVpnGatewaysClient(client_options=options, transport=transport,)
# It is an error to provide an api_key and a credential.
options = mock.Mock()
options.api_key = "api_key"
with pytest.raises(ValueError):
client = ExternalVpnGatewaysClient(
client_options=options, credentials=ga_credentials.AnonymousCredentials()
)
# It is an error to provide scopes and a transport instance.
transport = transports.ExternalVpnGatewaysRestTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
with pytest.raises(ValueError):
client = ExternalVpnGatewaysClient(
client_options={"scopes": ["1", "2"]}, transport=transport,
)
def test_transport_instance():
# A client may be instantiated with a custom transport instance.
transport = transports.ExternalVpnGatewaysRestTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
client = ExternalVpnGatewaysClient(transport=transport)
assert client.transport is transport
@pytest.mark.parametrize(
"transport_class", [transports.ExternalVpnGatewaysRestTransport,]
)
def test_transport_adc(transport_class):
# Test default credentials are used if not provided.
with mock.patch.object(google.auth, "default") as adc:
adc.return_value = (ga_credentials.AnonymousCredentials(), None)
transport_class()
adc.assert_called_once()
def test_external_vpn_gateways_base_transport_error():
# Passing both a credentials object and credentials_file should raise an error
with pytest.raises(core_exceptions.DuplicateCredentialArgs):
transport = transports.ExternalVpnGatewaysTransport(
credentials=ga_credentials.AnonymousCredentials(),
credentials_file="credentials.json",
)
def test_external_vpn_gateways_base_transport():
# Instantiate the base transport.
with mock.patch(
"google.cloud.compute_v1.services.external_vpn_gateways.transports.ExternalVpnGatewaysTransport.__init__"
) as Transport:
Transport.return_value = None
transport = transports.ExternalVpnGatewaysTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
# Every method on the transport should just blindly
# raise NotImplementedError.
methods = (
"delete",
"get",
"insert",
"list",
"set_labels",
"test_iam_permissions",
)
for method in methods:
with pytest.raises(NotImplementedError):
getattr(transport, method)(request=object())
with pytest.raises(NotImplementedError):
transport.close()
def test_external_vpn_gateways_base_transport_with_credentials_file():
# Instantiate the base transport with a credentials file
with mock.patch.object(
google.auth, "load_credentials_from_file", autospec=True
) as load_creds, mock.patch(
"google.cloud.compute_v1.services.external_vpn_gateways.transports.ExternalVpnGatewaysTransport._prep_wrapped_messages"
) as Transport:
Transport.return_value = None
load_creds.return_value = (ga_credentials.AnonymousCredentials(), None)
transport = transports.ExternalVpnGatewaysTransport(
credentials_file="credentials.json", quota_project_id="octopus",
)
load_creds.assert_called_once_with(
"credentials.json",
scopes=None,
default_scopes=(
"https://www.googleapis.com/auth/compute",
"https://www.googleapis.com/auth/cloud-platform",
),
quota_project_id="octopus",
)
def test_external_vpn_gateways_base_transport_with_adc():
# Test the default credentials are used if credentials and credentials_file are None.
with mock.patch.object(google.auth, "default", autospec=True) as adc, mock.patch(
"google.cloud.compute_v1.services.external_vpn_gateways.transports.ExternalVpnGatewaysTransport._prep_wrapped_messages"
) as Transport:
Transport.return_value = None
adc.return_value = (ga_credentials.AnonymousCredentials(), None)
transport = transports.ExternalVpnGatewaysTransport()
adc.assert_called_once()
def test_external_vpn_gateways_auth_adc():
# If no credentials are provided, we should use ADC credentials.
with mock.patch.object(google.auth, "default", autospec=True) as adc:
adc.return_value = (ga_credentials.AnonymousCredentials(), None)
ExternalVpnGatewaysClient()
adc.assert_called_once_with(
scopes=None,
default_scopes=(
"https://www.googleapis.com/auth/compute",
"https://www.googleapis.com/auth/cloud-platform",
),
quota_project_id=None,
)
def test_external_vpn_gateways_http_transport_client_cert_source_for_mtls():
cred = ga_credentials.AnonymousCredentials()
with mock.patch(
"google.auth.transport.requests.AuthorizedSession.configure_mtls_channel"
) as mock_configure_mtls_channel:
transports.ExternalVpnGatewaysRestTransport(
credentials=cred, client_cert_source_for_mtls=client_cert_source_callback
)
mock_configure_mtls_channel.assert_called_once_with(client_cert_source_callback)
@pytest.mark.parametrize("transport_name", ["rest",])
def test_external_vpn_gateways_host_no_port(transport_name):
client = ExternalVpnGatewaysClient(
credentials=ga_credentials.AnonymousCredentials(),
client_options=client_options.ClientOptions(
api_endpoint="compute.googleapis.com"
),
transport=transport_name,
)
assert client.transport._host == (
"compute.googleapis.com:443"
if transport_name in ["grpc", "grpc_asyncio"]
else "https://compute.googleapis.com"
)
@pytest.mark.parametrize("transport_name", ["rest",])
def test_external_vpn_gateways_host_with_port(transport_name):
client = ExternalVpnGatewaysClient(
credentials=ga_credentials.AnonymousCredentials(),
client_options=client_options.ClientOptions(
api_endpoint="compute.googleapis.com:8000"
),
transport=transport_name,
)
assert client.transport._host == (
"compute.googleapis.com:8000"
if transport_name in ["grpc", "grpc_asyncio"]
else "https://compute.googleapis.com:8000"
)
def test_common_billing_account_path():
billing_account = "squid"
expected = "billingAccounts/{billing_account}".format(
billing_account=billing_account,
)
actual = ExternalVpnGatewaysClient.common_billing_account_path(billing_account)
assert expected == actual
def test_parse_common_billing_account_path():
expected = {
"billing_account": "clam",
}
path = ExternalVpnGatewaysClient.common_billing_account_path(**expected)
# Check that the path construction is reversible.
actual = ExternalVpnGatewaysClient.parse_common_billing_account_path(path)
assert expected == actual
def test_common_folder_path():
folder = "whelk"
expected = "folders/{folder}".format(folder=folder,)
actual = ExternalVpnGatewaysClient.common_folder_path(folder)
assert expected == actual
def test_parse_common_folder_path():
expected = {
"folder": "octopus",
}
path = ExternalVpnGatewaysClient.common_folder_path(**expected)
# Check that the path construction is reversible.
actual = ExternalVpnGatewaysClient.parse_common_folder_path(path)
assert expected == actual
def test_common_organization_path():
organization = "oyster"
expected = "organizations/{organization}".format(organization=organization,)
actual = ExternalVpnGatewaysClient.common_organization_path(organization)
assert expected == actual
def test_parse_common_organization_path():
expected = {
"organization": "nudibranch",
}
path = ExternalVpnGatewaysClient.common_organization_path(**expected)
# Check that the path construction is reversible.
actual = ExternalVpnGatewaysClient.parse_common_organization_path(path)
assert expected == actual
def test_common_project_path():
project = "cuttlefish"
expected = "projects/{project}".format(project=project,)
actual = ExternalVpnGatewaysClient.common_project_path(project)
assert expected == actual
def test_parse_common_project_path():
expected = {
"project": "mussel",
}
path = ExternalVpnGatewaysClient.common_project_path(**expected)
# Check that the path construction is reversible.
actual = ExternalVpnGatewaysClient.parse_common_project_path(path)
assert expected == actual
def test_common_location_path():
project = "winkle"
location = "nautilus"
expected = "projects/{project}/locations/{location}".format(
project=project, location=location,
)
actual = ExternalVpnGatewaysClient.common_location_path(project, location)
assert expected == actual
def test_parse_common_location_path():
expected = {
"project": "scallop",
"location": "abalone",
}
path = ExternalVpnGatewaysClient.common_location_path(**expected)
# Check that the path construction is reversible.
actual = ExternalVpnGatewaysClient.parse_common_location_path(path)
assert expected == actual
def test_client_with_default_client_info():
client_info = gapic_v1.client_info.ClientInfo()
with mock.patch.object(
transports.ExternalVpnGatewaysTransport, "_prep_wrapped_messages"
) as prep:
client = ExternalVpnGatewaysClient(
credentials=ga_credentials.AnonymousCredentials(), client_info=client_info,
)
prep.assert_called_once_with(client_info)
with mock.patch.object(
transports.ExternalVpnGatewaysTransport, "_prep_wrapped_messages"
) as prep:
transport_class = ExternalVpnGatewaysClient.get_transport_class()
transport = transport_class(
credentials=ga_credentials.AnonymousCredentials(), client_info=client_info,
)
prep.assert_called_once_with(client_info)
def test_transport_close():
transports = {
"rest": "_session",
}
for transport, close_name in transports.items():
client = ExternalVpnGatewaysClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport
)
with mock.patch.object(
type(getattr(client.transport, close_name)), "close"
) as close:
with client:
close.assert_not_called()
close.assert_called_once()
def test_client_ctx():
transports = [
"rest",
]
for transport in transports:
client = ExternalVpnGatewaysClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport
)
# Test client calls underlying transport.
with mock.patch.object(type(client.transport), "close") as close:
close.assert_not_called()
with client:
pass
close.assert_called()
@pytest.mark.parametrize(
"client_class,transport_class",
[(ExternalVpnGatewaysClient, transports.ExternalVpnGatewaysRestTransport),],
)
def test_api_key_credentials(client_class, transport_class):
with mock.patch.object(
google.auth._default, "get_api_key_credentials", create=True
) as get_api_key_credentials:
mock_cred = mock.Mock()
get_api_key_credentials.return_value = mock_cred
options = client_options.ClientOptions()
options.api_key = "api_key"
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(client_options=options)
patched.assert_called_once_with(
credentials=mock_cred,
credentials_file=None,
host=client.DEFAULT_ENDPOINT,
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
|
googleapis/python-compute
|
tests/unit/gapic/compute_v1/test_external_vpn_gateways.py
|
Python
|
apache-2.0
| 101,376
|
[
"Octopus"
] |
e6288a53a6063f8b65b56545d4e0a53e4abb49ff1fede4adc3e2937ef5d18f07
|
from __future__ import unicode_literals
import json
import os
from django.contrib.auth import get_user_model
from django.contrib.auth.models import Group, Permission
from django.core.files.base import ContentFile
from django.forms.utils import ErrorDict
from django.test import TestCase, modify_settings
from django.test.utils import override_settings
from django.urls import NoReverseMatch, reverse
from wagtail.core.models import Collection, GroupCollectionPermission, Page
from wagtail.tests.utils import WagtailTestUtils
from six import b
from tests.testapp.models import EventPage, EventPageRelatedMedia
from wagtailmedia import models
class TestMediaIndexView(TestCase, WagtailTestUtils):
def setUp(self):
self.login()
def test_simple(self):
response = self.client.get(reverse("wagtailmedia:index"))
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, "wagtailmedia/media/index.html")
self.assertContains(response, "Add audio")
self.assertContains(response, "Add video")
@modify_settings(INSTALLED_APPS={"prepend": "tests.testextends"})
def test_extends(self):
response = self.client.get(reverse("wagtailmedia:index"))
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, "wagtailmedia/media/index.html")
self.assertNotContains(response, "Add audio")
self.assertNotContains(response, "Add video")
self.assertContains(response, "You shan't act")
def test_search(self):
response = self.client.get(reverse("wagtailmedia:index"), {"q": "Hello"})
self.assertEqual(response.status_code, 200)
self.assertEqual(response.context["query_string"], "Hello")
@staticmethod
def make_media():
fake_file = ContentFile(b("A boring example song"), name="song.mp3")
for i in range(50):
media = models.Media(
title="Test " + str(i), duration=100 + i, file=fake_file, type="audio"
)
media.save()
def test_pagination(self):
self.make_media()
response = self.client.get(reverse("wagtailmedia:index"), {"p": 2})
# Check response
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, "wagtailmedia/media/index.html")
# Check that we got the correct page
self.assertEqual(response.context["media_files"].number, 2)
def test_pagination_invalid(self):
self.make_media()
response = self.client.get(reverse("wagtailmedia:index"), {"p": "Hello World!"})
# Check response
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, "wagtailmedia/media/index.html")
# Check that we got page one
self.assertEqual(response.context["media_files"].number, 1)
def test_pagination_out_of_range(self):
self.make_media()
response = self.client.get(reverse("wagtailmedia:index"), {"p": 99999})
# Check response
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, "wagtailmedia/media/index.html")
# Check that we got the last page
self.assertEqual(
response.context["media_files"].number,
response.context["media_files"].paginator.num_pages,
)
def test_ordering(self):
orderings = ["title", "-created_at"]
for ordering in orderings:
response = self.client.get(
reverse("wagtailmedia:index"), {"ordering": ordering}
)
self.assertEqual(response.status_code, 200)
class TestMediaAddView(TestCase, WagtailTestUtils):
def setUp(self):
self.login()
def test_action_block(self):
with self.settings(
TEMPLATES=[
{
"BACKEND": "django.template.backends.django.DjangoTemplates",
"DIRS": [os.path.join(os.path.dirname(__file__), "templates")],
"APP_DIRS": True,
"OPTIONS": {
"context_processors": [
"django.template.context_processors.debug",
"django.template.context_processors.request",
"django.contrib.auth.context_processors.auth",
"django.contrib.messages.context_processors.messages",
"django.template.context_processors.request",
"wagtail.contrib.settings.context_processors.settings",
],
"debug": True,
},
}
]
):
response = self.client.get(reverse("wagtailmedia:add", args=("audio",)))
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, "wagtailmedia/media/add.html")
self.assertContains(
response,
'<form action="/somewhere/else" method="POST" enctype="multipart/form-data" novalidate>',
)
def test_get_audio(self):
response = self.client.get(reverse("wagtailmedia:add", args=("audio",)))
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, "wagtailmedia/media/add.html")
# as standard, only the root collection exists and so no 'Collection' option
# is displayed on the form
self.assertNotContains(response, '<label for="id_collection">')
self.assertContains(response, "Add audio")
self.assertNotContains(response, "Add audio or video")
self.assertContains(
response,
'<form action="{0}" method="POST" enctype="multipart/form-data" novalidate>'.format(
reverse("wagtailmedia:add", args=("audio",))
),
count=1,
)
def test_get_video(self):
response = self.client.get(reverse("wagtailmedia:add", args=("video",)))
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, "wagtailmedia/media/add.html")
self.assertContains(response, "Add video")
self.assertNotContains(response, "Add audio or video")
self.assertContains(
response,
'<form action="{0}" method="POST" enctype="multipart/form-data" novalidate>'.format(
reverse("wagtailmedia:add", args=("video",))
),
count=1,
)
# as standard, only the root collection exists and so no 'Collection' option
# is displayed on the form
self.assertNotContains(response, '<label for="id_collection">')
# draftail should NOT be a standard JS include on this page
self.assertNotContains(response, "wagtailadmin/js/draftail.js")
def test_get_audio_or_video(self):
response = self.client.get(reverse("wagtailmedia:add", args=("media",)))
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, "wagtailmedia/media/add.html")
self.assertNotContains(response, "Add video")
self.assertContains(response, "Add audio or video")
def test_get_audio_with_collections(self):
root_collection = Collection.get_first_root_node()
root_collection.add_child(name="Evil plans")
response = self.client.get(reverse("wagtailmedia:add", args=("audio",)))
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, "wagtailmedia/media/add.html")
self.assertContains(response, '<label for="id_collection">')
self.assertContains(response, "Evil plans")
self.assertContains(response, "Add audio")
self.assertContains(
response,
'<form action="{0}" method="POST" enctype="multipart/form-data" novalidate>'.format(
reverse("wagtailmedia:add", args=("audio",))
),
count=1,
)
def test_get_video_with_collections(self):
root_collection = Collection.get_first_root_node()
root_collection.add_child(name="Evil plans")
response = self.client.get(reverse("wagtailmedia:add", args=("video",)))
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, "wagtailmedia/media/add.html")
self.assertContains(response, '<label for="id_collection">')
self.assertContains(response, "Evil plans")
self.assertContains(response, "Add video")
self.assertContains(
response,
'<form action="{0}" method="POST" enctype="multipart/form-data" novalidate>'.format(
reverse("wagtailmedia:add", args=("video",))
),
count=1,
)
def test_post_audio(self):
# Build a fake file
fake_file = ContentFile(b("A boring example song"), name="song.mp3")
# Submit
post_data = {"title": "Test media", "file": fake_file, "duration": 100}
response = self.client.post(
reverse("wagtailmedia:add", args=("audio",)), post_data
)
# User should be redirected back to the index
self.assertRedirects(response, reverse("wagtailmedia:index"))
# Media should be created, and be placed in the root collection
self.assertTrue(models.Media.objects.filter(title="Test media").exists())
root_collection = Collection.get_first_root_node()
media = models.Media.objects.get(title="Test media")
self.assertEqual(media.collection, root_collection)
self.assertEqual(media.type, "audio")
def test_post_video(self):
# Build a fake file
fake_file = ContentFile(b("A boring example movie"), name="movie.mp4")
# Submit
post_data = {
"title": "Test media",
"file": fake_file,
"duration": 100,
"width": 720,
"height": 480,
}
response = self.client.post(
reverse("wagtailmedia:add", args=("video",)), post_data
)
# User should be redirected back to the index
self.assertRedirects(response, reverse("wagtailmedia:index"))
# Media should be created, and be placed in the root collection
self.assertTrue(models.Media.objects.filter(title="Test media").exists())
root_collection = Collection.get_first_root_node()
media = models.Media.objects.get(title="Test media")
self.assertEqual(media.collection, root_collection)
self.assertEqual(media.type, "video")
def test_post_audio_with_collections(self):
root_collection = Collection.get_first_root_node()
evil_plans_collection = root_collection.add_child(name="Evil plans")
# Build a fake file
fake_file = ContentFile(b("A boring example song"), name="song.mp3")
# Submit
post_data = {
"title": "Test media",
"file": fake_file,
"duration": 100,
"collection": evil_plans_collection.id,
}
response = self.client.post(
reverse("wagtailmedia:add", args=("audio",)), post_data
)
# User should be redirected back to the index
self.assertRedirects(response, reverse("wagtailmedia:index"))
# Media should be created, and be placed in the Evil Plans collection
self.assertTrue(models.Media.objects.filter(title="Test media").exists())
media = models.Media.objects.get(title="Test media")
self.assertEqual(media.collection, evil_plans_collection)
self.assertEqual(media.type, "audio")
def test_post_video_with_collections(self):
root_collection = Collection.get_first_root_node()
evil_plans_collection = root_collection.add_child(name="Evil plans")
# Submit
post_data = {
"title": "Test media",
"file": ContentFile(b("A boring example movie"), name="movie.mp4"),
"duration": 100,
"collection": evil_plans_collection.id,
}
response = self.client.post(
reverse("wagtailmedia:add", args=("video",)), post_data
)
# User should be redirected back to the index
self.assertRedirects(response, reverse("wagtailmedia:index"))
# Media should be created, and be placed in the Evil Plans collection
self.assertTrue(models.Media.objects.filter(title="Test media").exists())
media = models.Media.objects.get(title="Test media")
self.assertEqual(media.collection, evil_plans_collection)
self.assertEqual(media.type, "video")
@override_settings(WAGTAILMEDIA={"MEDIA_MODEL": "wagtailmedia_tests.CustomMedia"})
def test_get_with_custom_model(self):
# both audio and video use the same template
response = self.client.get(reverse("wagtailmedia:add", args=("video",)))
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, "wagtailmedia/media/add.html")
# Ensure the form supports file uploads
self.assertContains(response, 'enctype="multipart/form-data"')
# form media should be imported
self.assertContains(response, "wagtailadmin/js/draftail.js")
class TestMediaAddViewWithLimitedCollectionPermissions(TestCase, WagtailTestUtils):
def setUp(self):
add_media_permission = Permission.objects.get(
content_type__app_label="wagtailmedia", codename="add_media"
)
admin_permission = Permission.objects.get(
content_type__app_label="wagtailadmin", codename="access_admin"
)
root_collection = Collection.get_first_root_node()
self.evil_plans_collection = root_collection.add_child(name="Evil plans")
conspirators_group = Group.objects.create(name="Evil conspirators")
conspirators_group.permissions.add(admin_permission)
GroupCollectionPermission.objects.create(
group=conspirators_group,
collection=self.evil_plans_collection,
permission=add_media_permission,
)
user = get_user_model().objects.create_user(
username="moriarty", email="moriarty@example.com", password="password"
)
user.groups.add(conspirators_group)
self.client.login(username="moriarty", password="password")
def test_get_audio(self):
response = self.client.get(reverse("wagtailmedia:add", args=("audio",)))
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, "wagtailmedia/media/add.html")
# user only has access to one collection, so no 'Collection' option
# is displayed on the form
self.assertNotContains(response, '<label for="id_collection">')
self.assertContains(response, "Add audio")
self.assertContains(
response,
'<form action="{0}" method="POST" enctype="multipart/form-data" novalidate>'.format(
reverse("wagtailmedia:add", args=("audio",))
),
count=1,
)
def test_get_video(self):
response = self.client.get(reverse("wagtailmedia:add", args=("video",)))
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, "wagtailmedia/media/add.html")
# user only has access to one collection, so no 'Collection' option
# is displayed on the form
self.assertNotContains(response, '<label for="id_collection">')
self.assertContains(response, "Add video")
self.assertContains(
response,
'<form action="{0}" method="POST" enctype="multipart/form-data" novalidate>'.format(
reverse("wagtailmedia:add", args=("video",))
),
count=1,
)
def test_post_audio(self):
# Build a fake file
fake_file = ContentFile(b("A boring example song"), name="song.mp3")
# Submit
post_data = {"title": "Test media", "file": fake_file, "duration": 100}
response = self.client.post(
reverse("wagtailmedia:add", args=("audio",)), post_data
)
# User should be redirected back to the index
self.assertRedirects(response, reverse("wagtailmedia:index"))
# Media should be created with type 'audio' and in the 'evil plans' collection,
# despite there being no collection field in the form, because that's the
# only one the user has access to
self.assertTrue(models.Media.objects.filter(title="Test media").exists())
media = models.Media.objects.get(title="Test media")
self.assertEqual(media.collection, self.evil_plans_collection)
self.assertEqual(media.type, "audio")
def test_post_video(self):
# Build a fake file
fake_file = ContentFile(b("A boring example movie"), name="movie.mp4")
# Submit
post_data = {"title": "Test media", "file": fake_file, "duration": 100}
response = self.client.post(
reverse("wagtailmedia:add", args=("video",)), post_data
)
# User should be redirected back to the index
self.assertRedirects(response, reverse("wagtailmedia:index"))
# Media should be created with type 'video' and in the 'evil plans' collection,
# despite there being no collection field in the form, because that's the
# only one the user has access to
self.assertTrue(models.Media.objects.filter(title="Test media").exists())
media = models.Media.objects.get(title="Test media")
self.assertEqual(media.collection, self.evil_plans_collection)
self.assertEqual(media.type, "video")
class TestMediaEditView(TestCase, WagtailTestUtils):
def setUp(self):
self.login()
# Build a fake file
fake_file = ContentFile(b("A boring example song"), name="song.mp3")
# Create a media to edit
self.media = models.get_media_model().objects.create(
title="Test media", file=fake_file, duration=100
)
def test_simple(self):
response = self.client.get(reverse("wagtailmedia:edit", args=(self.media.id,)))
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, "wagtailmedia/media/edit.html")
self.assertContains(response, "Filesize")
self.assertNotContains(response, "wagtailadmin/js/draftail.js")
@modify_settings(INSTALLED_APPS={"prepend": "tests.testextends"})
def test_extends(self):
response = self.client.get(reverse("wagtailmedia:edit", args=(self.media.id,)))
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, "wagtailmedia/media/edit.html")
self.assertNotContains(response, "Filesize")
self.assertContains(response, "sweet-style")
self.assertContains(response, "sweet-code")
self.assertContains(response, "sweet-form-row")
self.assertContains(response, "sweet-stats")
def test_post(self):
# Build a fake file
fake_file = ContentFile(b("A boring example song"), name="song.mp3")
# Submit title change
post_data = {"title": "Test media changed!", "file": fake_file, "duration": 100}
response = self.client.post(
reverse("wagtailmedia:edit", args=(self.media.id,)), post_data
)
# User should be redirected back to the index
self.assertRedirects(response, reverse("wagtailmedia:index"))
# Media title should be changed
self.assertEqual(
models.Media.objects.get(id=self.media.id).title, "Test media changed!"
)
def test_with_missing_source_file(self):
# Build a fake file
fake_file = ContentFile(b("An ephemeral media"), name="to-be-deleted.mp3")
# Create a new media to delete the source for
media = models.Media.objects.create(
title="Test missing source media", file=fake_file, duration=100
)
media.file.delete(False)
response = self.client.get(reverse("wagtailmedia:edit", args=(media.id,)), {})
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, "wagtailmedia/media/edit.html")
self.assertContains(response, "File not found")
@override_settings(WAGTAILMEDIA={"MEDIA_MODEL": "wagtailmedia_tests.CustomMedia"})
def test_get_with_custom_model(self):
# Build a fake file
fake_file = ContentFile(b("A boring example song"), name="song.mp3")
# Create a media to edit
media = models.get_media_model().objects.create(
title="Test custom media", file=fake_file, duration=100
)
response = self.client.get(reverse("wagtailmedia:edit", args=(media.id,)))
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, "wagtailmedia/media/edit.html")
# Ensure the form supports file uploads
self.assertContains(response, 'enctype="multipart/form-data"')
# form media should be imported
self.assertContains(response, "wagtailadmin/js/draftail.js")
class TestMediaDeleteView(TestCase, WagtailTestUtils):
def setUp(self):
self.login()
# Create a media to delete
self.media = models.Media.objects.create(title="Test media", duration=100)
def test_simple(self):
response = self.client.get(
reverse("wagtailmedia:delete", args=(self.media.id,))
)
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, "wagtailmedia/media/confirm_delete.html")
def test_delete(self):
# Submit title change
post_data = {"foo": "bar"}
response = self.client.post(
reverse("wagtailmedia:delete", args=(self.media.id,)), post_data
)
# User should be redirected back to the index
self.assertRedirects(response, reverse("wagtailmedia:index"))
# Media should be deleted
self.assertFalse(models.Media.objects.filter(id=self.media.id).exists())
@override_settings(WAGTAIL_USAGE_COUNT_ENABLED=True)
def test_usage_link(self):
response = self.client.get(
reverse("wagtailmedia:delete", args=(self.media.id,))
)
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, "wagtailmedia/media/confirm_delete.html")
self.assertIn("Used 0 times", str(response.content))
class TestMediaChooserView(TestCase, WagtailTestUtils):
def setUp(self):
self.user = self.login()
def test_simple(self):
response = self.client.get(reverse("wagtailmedia:chooser"))
self.assertEqual(response.status_code, 200)
self.assertEqual(response["Content-Type"], "application/json")
json_data = json.loads(response.content.decode("utf-8"))
self.assertSetEqual(
set(json_data.keys()),
{"html", "step", "error_label", "error_message", "tag_autocomplete_url"},
)
self.assertTemplateUsed(response, "wagtailmedia/chooser/chooser.html")
self.assertEqual(json_data["step"], "chooser")
self.assertEqual(
json_data["tag_autocomplete_url"], reverse("wagtailadmin_tag_autocomplete")
)
# draftail should NOT be a standard JS include on this page
self.assertNotIn("wagtailadmin/js/draftail.js", json_data["html"])
def test_search(self):
response = self.client.get(reverse("wagtailmedia:chooser"), {"q": "Hello"})
self.assertEqual(response.status_code, 200)
self.assertEqual(response.context["query_string"], "Hello")
@staticmethod
def make_media():
fake_file = ContentFile(b("A boring example song"), name="song.mp3")
for i in range(50):
media = models.Media(
title="Test " + str(i), duration=100 + i, file=fake_file, type="audio"
)
media.save()
def test_pagination(self):
self.make_media()
response = self.client.get(reverse("wagtailmedia:chooser"), {"p": 2})
# Check response
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, "wagtailmedia/media/list.html")
# Check that we got the correct page
self.assertEqual(response.context["media_files"].number, 2)
def test_pagination_invalid(self):
self.make_media()
response = self.client.get(
reverse("wagtailmedia:chooser"), {"p": "Hello World!"}
)
# Check response
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, "wagtailmedia/media/list.html")
# Check that we got page one
self.assertEqual(response.context["media_files"].number, 1)
def test_pagination_out_of_range(self):
self.make_media()
response = self.client.get(reverse("wagtailmedia:chooser"), {"p": 99999})
# Check response
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, "wagtailmedia/media/list.html")
# Check that we got the last page
self.assertEqual(
response.context["media_files"].number,
response.context["media_files"].paginator.num_pages,
)
def test_construct_queryset_hook_browse(self):
media = models.Media.objects.create(
title="Test media shown",
duration=100,
type="audio",
uploaded_by_user=self.user,
)
models.Media.objects.create(
title="Test media not shown", duration=100, type="audio"
)
def filter_media(media, request):
return media.filter(uploaded_by_user=self.user)
with self.register_hook("construct_media_chooser_queryset", filter_media):
response = self.client.get(reverse("wagtailmedia:chooser"))
self.assertEqual(len(response.context["media_files"]), 1)
self.assertEqual(response.context["media_files"][0], media)
def test_construct_queryset_hook_search(self):
media = models.Media.objects.create(
title="Test media shown",
duration=100,
type="audio",
uploaded_by_user=self.user,
)
models.Media.objects.create(
title="Test media not shown", duration=100, type="audio"
)
def filter_media(media, request):
return media.filter(uploaded_by_user=self.user)
with self.register_hook("construct_media_chooser_queryset", filter_media):
response = self.client.get(reverse("wagtailmedia:chooser"), {"q": "Test"})
self.assertEqual(len(response.context["media_files"]), 1)
self.assertEqual(response.context["media_files"][0], media)
@override_settings(WAGTAILMEDIA={"MEDIA_MODEL": "wagtailmedia_tests.CustomMedia"})
def test_with_custom_model(self):
response = self.client.get(reverse("wagtailmedia:chooser"))
self.assertEqual(response.status_code, 200)
json_data = json.loads(response.content.decode())
self.assertEqual(json_data["step"], "chooser")
# custom form fields should be present
self.assertIn('name="media-chooser-upload-fancy_caption"', json_data["html"])
# form media imports should appear on the page
self.assertIn("wagtailadmin/js/draftail.js", json_data["html"])
class TestTypedMediaChooserView(TestCase, WagtailTestUtils):
@classmethod
def setUpTestData(cls):
fake_audio = ContentFile(b("A boring example song"))
fake_audio.name = "song.mp3"
audio = models.Media(
title="Test audio", duration=100, file=fake_audio, type="audio"
)
audio.save()
fake_video = ContentFile(b("An exciting video"))
fake_video.name = "video.mp4"
video = models.Media(
title="Test video", duration=100, file=fake_video, type="video"
)
video.save()
def setUp(self):
self.user = self.login()
def test_audio_chooser(self):
response = self.client.get(
reverse("wagtailmedia:chooser_typed", args=("audio",))
)
self.assertEqual(response.status_code, 200)
self.assertEqual(response["Content-Type"], "application/json")
json_data = json.loads(response.content.decode("utf-8"))
self.assertSetEqual(
set(json_data.keys()),
{"html", "step", "error_label", "error_message", "tag_autocomplete_url"},
)
self.assertTemplateUsed(response, "wagtailmedia/chooser/chooser.html")
self.assertEqual(json_data["step"], "chooser")
self.assertEqual(
json_data["tag_autocomplete_url"], reverse("wagtailadmin_tag_autocomplete")
)
html = response.json().get("html")
self.assertInHTML("Test audio", html)
self.assertInHTML('<a href="#upload-audio">Upload Audio</a>', html)
self.assertNotInHTML("Test video", html)
self.assertNotInHTML('<a href="#upload-video">Upload Video</a>', html)
def test_video_chooser(self):
response = self.client.get(
reverse("wagtailmedia:chooser_typed", args=("video",))
)
self.assertEqual(response.status_code, 200)
self.assertEqual(response["Content-Type"], "application/json")
html = response.json().get("html")
self.assertInHTML("Test video", html)
self.assertInHTML('<a href="#upload-video">Upload Video</a>', html)
self.assertNotInHTML("Test audio", html)
self.assertNotInHTML('<a href="#upload-audio">Upload Audio</a>', html)
def test_typed_chooser_with_invalid_media_type(self):
with self.assertRaises(NoReverseMatch):
self.client.get(
reverse("wagtailmedia:chooser_typed", args=("subspace-transmission",))
)
class TestMediaChooserViewPermissions(TestCase, WagtailTestUtils):
def setUp(self):
add_media_permission = Permission.objects.get(
content_type__app_label="wagtailmedia", codename="add_media"
)
admin_permission = Permission.objects.get(
content_type__app_label="wagtailadmin", codename="access_admin"
)
self.root_collection = Collection.get_first_root_node()
self.evil_plans_collection = self.root_collection.add_child(name="Evil plans")
conspirators_group = Group.objects.create(name="Evil conspirators")
conspirators_group.permissions.add(admin_permission)
GroupCollectionPermission.objects.create(
group=conspirators_group,
collection=self.evil_plans_collection,
permission=add_media_permission,
)
user = get_user_model().objects.create_user(
username="moriarty", email="moriarty@example.com", password="password"
)
user.groups.add(conspirators_group)
fake_file = ContentFile(b("A boring song"), name="test-song.mp3")
media = models.Media(
title="Test",
duration=100,
file=fake_file,
type="audio",
collection=self.root_collection,
)
media.save()
def test_all_permissions_views_root_media(self):
self.login()
response = self.client.get(
reverse("wagtailmedia:chooser"), {"collection_id": self.root_collection.id}
)
self.assertIn("test-song.mp3", str(response.content))
def test_single_collection_permissions_views_nothing(self):
self.client.login(username="moriarty", password="password")
response = self.client.get(
reverse("wagtailmedia:chooser"), {"collection_id": self.root_collection.id}
)
media_add_url = reverse("wagtailmedia:add", args=("media",))
self.assertContains(
response,
f'You haven\'t uploaded any media. Why not <a href="{media_add_url}">upload one now</a>',
)
def test_upload_permission(self):
user = get_user_model().objects.create_user(
username="user", email="user@example.com", password="password"
)
user.user_permissions.add(
Permission.objects.get(
content_type__app_label="wagtailadmin", codename="access_admin"
)
)
user.save()
self.login(user)
response = self.client.get(reverse("wagtailmedia:chooser"))
self.assertEqual(response.context["uploadforms"], {})
class TestMediaChooserChosenView(TestCase, WagtailTestUtils):
def setUp(self):
self.login()
# Create a media to choose
self.media = models.Media.objects.create(
title="Test media", file="media.mp3", duration=100
)
def test_simple(self):
response = self.client.get(
reverse("wagtailmedia:media_chosen", args=(self.media.id,))
)
self.assertEqual(response.status_code, 200)
self.assertEqual(response["Content-Type"], "application/json")
self.assertDictEqual(
json.loads(response.content.decode("utf-8")),
{
"step": "media_chosen",
"result": {
"id": self.media.id,
"title": self.media.title,
"edit_link": reverse("wagtailmedia:edit", args=[self.media.id]),
},
},
)
class TestMediaChooserUploadView(TestCase, WagtailTestUtils):
def setUp(self):
self.login()
def test_upload_audio(self):
response = self.client.post(
reverse("wagtailmedia:chooser_upload", args=("audio",)),
{
"media-chooser-upload-title": "Test audio",
"media-chooser-upload-file": ContentFile(
b("A boring example"), name="audio.mp3"
),
"media-chooser-upload-duration": "100",
},
)
# Check response
self.assertEqual(response.status_code, 200)
# Check that the audio was created
media_files = models.Media.objects.filter(title="Test audio")
self.assertEqual(media_files.count(), 1)
# Test that fields are populated correctly
media = media_files.first()
self.assertEqual(media.type, "audio")
self.assertEqual(media.duration, 100)
def test_upload_video(self):
response = self.client.post(
reverse("wagtailmedia:chooser_upload", args=("video",)),
{
"media-chooser-upload-title": "Test video",
"media-chooser-upload-file": ContentFile(
b("A boring example"), name="video.avi"
),
"media-chooser-upload-duration": "100",
"media-chooser-upload-width": "640",
"media-chooser-upload-height": "480",
},
)
# Check response
self.assertEqual(response.status_code, 200)
# Check that the video was created
media_files = models.Media.objects.filter(title="Test video")
self.assertEqual(media_files.count(), 1)
# Test that fields are populated correctly
media = media_files.first()
self.assertEqual(media.type, "video")
self.assertEqual(media.duration, 100)
self.assertEqual(media.width, 640)
self.assertEqual(media.height, 480)
def test_upload_no_file_selected(self):
response = self.client.post(
reverse("wagtailmedia:chooser_upload", args=("video",)),
{"media-chooser-upload-title": "Test video"},
)
# Shouldn't redirect anywhere
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, "wagtailmedia/chooser/chooser.html")
# The video form should have an error
self.assertIn("uploadforms", response.context)
self.assertIn("video", response.context["uploadforms"])
video_form = response.context["uploadforms"]["video"]
self.assertIn("This field is required.", video_form.errors["file"])
self.assertEqual(video_form.instance.title, "Test video")
self.assertEqual(video_form.instance.type, "video")
# the audio form should not have an error
self.assertIn("audio", response.context["uploadforms"])
audio_form = response.context["uploadforms"]["audio"]
self.assertEqual(audio_form.errors, ErrorDict())
self.assertEqual(audio_form.instance.title, "")
self.assertEqual(audio_form.instance.type, "audio")
# try the audio form
response = self.client.post(
reverse("wagtailmedia:chooser_upload", args=("audio",)),
{"media-chooser-upload-title": "Test audio"},
)
audio_form = response.context["uploadforms"]["audio"]
self.assertIn("This field is required.", audio_form.errors["file"])
self.assertEqual(audio_form.instance.title, "Test audio")
self.assertEqual(audio_form.instance.type, "audio")
video_form = response.context["uploadforms"]["video"]
self.assertEqual(video_form.errors, ErrorDict())
self.assertEqual(video_form.instance.title, "")
self.assertEqual(video_form.instance.type, "video")
@override_settings(
DEFAULT_FILE_STORAGE="wagtail.tests.dummy_external_storage.DummyExternalStorage"
)
def test_upload_with_external_storage(self):
response = self.client.post(
reverse("wagtailmedia:chooser_upload", args=("video",)),
{
"media-chooser-upload-title": "Test video",
"media-chooser-upload-file": ContentFile(
b("A boring example"), name="video.avi"
),
"media-chooser-upload-duration": "100",
},
)
# Check response
self.assertEqual(response.status_code, 200)
# Check that the video was created
self.assertTrue(models.Media.objects.filter(title="Test video").exists())
class TestUsageCount(TestCase, WagtailTestUtils):
fixtures = ["test.json"]
def setUp(self):
self.login()
@override_settings(WAGTAIL_USAGE_COUNT_ENABLED=True)
def test_unused_media_usage_count(self):
media = models.Media.objects.get(id=1)
self.assertEqual(media.get_usage().count(), 0)
@override_settings(WAGTAIL_USAGE_COUNT_ENABLED=True)
def test_used_media_usage_count(self):
media = models.Media.objects.get(id=1)
page = EventPage.objects.get(id=3)
event_page_related_link = EventPageRelatedMedia()
event_page_related_link.page = page
event_page_related_link.link_media = media
event_page_related_link.save()
self.assertEqual(media.get_usage().count(), 1)
def test_usage_count_does_not_appear(self):
media = models.Media.objects.get(id=1)
page = EventPage.objects.get(id=3)
event_page_related_link = EventPageRelatedMedia()
event_page_related_link.page = page
event_page_related_link.link_media = media
event_page_related_link.save()
response = self.client.get(reverse("wagtailmedia:edit", args=(1,)))
self.assertNotContains(response, "Used 1 time")
@override_settings(WAGTAIL_USAGE_COUNT_ENABLED=True)
def test_usage_count_appears(self):
media = models.Media.objects.get(id=1)
page = EventPage.objects.get(id=3)
event_page_related_link = EventPageRelatedMedia()
event_page_related_link.page = page
event_page_related_link.link_media = media
event_page_related_link.save()
response = self.client.get(reverse("wagtailmedia:edit", args=(1,)))
self.assertContains(response, "Used 1 time")
@override_settings(WAGTAIL_USAGE_COUNT_ENABLED=True)
def test_usage_count_zero_appears(self):
response = self.client.get(reverse("wagtailmedia:edit", args=(1,)))
self.assertContains(response, "Used 0 times")
class TestGetUsage(TestCase, WagtailTestUtils):
fixtures = ["test.json"]
def setUp(self):
self.login()
def test_media_get_usage_not_enabled(self):
media = models.Media.objects.get(id=1)
self.assertEqual(list(media.get_usage()), [])
@override_settings(WAGTAIL_USAGE_COUNT_ENABLED=True)
def test_unused_media_get_usage(self):
media = models.Media.objects.get(id=1)
self.assertEqual(list(media.get_usage()), [])
@override_settings(WAGTAIL_USAGE_COUNT_ENABLED=True)
def test_used_media_get_usage(self):
media = models.Media.objects.get(id=1)
page = EventPage.objects.get(id=3)
event_page_related_link = EventPageRelatedMedia()
event_page_related_link.page = page
event_page_related_link.link_media = media
event_page_related_link.save()
self.assertTrue(issubclass(Page, type(media.get_usage()[0])))
@override_settings(WAGTAIL_USAGE_COUNT_ENABLED=True)
def test_usage_page(self):
media = models.Media.objects.get(id=1)
page = EventPage.objects.get(id=3)
event_page_related_link = EventPageRelatedMedia()
event_page_related_link.page = page
event_page_related_link.link_media = media
event_page_related_link.save()
response = self.client.get(reverse("wagtailmedia:media_usage", args=(1,)))
self.assertContains(response, "Christmas")
@override_settings(WAGTAIL_USAGE_COUNT_ENABLED=True)
def test_usage_page_no_usage(self):
response = self.client.get(reverse("wagtailmedia:media_usage", args=(1,)))
# There's no usage so there should be no table rows
self.assertRegex(response.content, rb"<tbody>(\s|\n)*</tbody>")
|
torchbox/wagtailmedia
|
tests/test_views.py
|
Python
|
bsd-3-clause
| 41,981
|
[
"exciting"
] |
7decdc86432909b0e76a867ec74edfc3750402d2fec776b081b7e945fee3e4c3
|
"""
Test of the classical LM model for language modelling
"""
from groundhog.datasets import LMIterator
from groundhog.trainer.SGD_momentum import SGD as SGD_m
from groundhog.trainer.SGD import SGD
from groundhog.mainLoop import MainLoop
from groundhog.layers import MultiLayer, \
RecurrentMultiLayer, \
RecurrentMultiLayerInp, \
RecurrentMultiLayerShortPath, \
RecurrentMultiLayerShortPathInp, \
RecurrentMultiLayerShortPathInpAll, \
SoftmaxLayer, \
LastState,\
UnaryOp, \
DropOp, \
Operator, \
Shift, \
GaussianNoise, \
SigmoidLayer
from groundhog.layers import maxpool, \
maxpool_ntimes, \
last, \
last_ntimes,\
tanh, \
sigmoid, \
rectifier,\
hard_sigmoid, \
hard_tanh
from groundhog.models import LM_Model
from theano import scan
import numpy
import theano
import theano.tensor as TT
linear = lambda x:x
rect = lambda x:TT.maximum(0., x)
theano.config.allow_gc = False
def get_text_data(state):
def out_format (x, y, r):
return {'x':x, 'y' :y, 'reset': r}
def out_format_valid (x, y, r):
return {'x':x, 'y' :y, 'reset': r}
train_data = LMIterator(
batch_size=state['bs'],
path = state['path'],
stop=-1,
seq_len = state['seqlen'],
mode="train",
chunks=state['chunks'],
shift = state['shift'],
output_format = out_format,
can_fit=True)
valid_data = LMIterator(
batch_size=state['bs'],
path=state['path'],
stop=-1,
use_infinite_loop=False,
allow_short_sequences = True,
seq_len= state['seqlen'],
mode="valid",
reset =state['reset'],
chunks=state['chunks'],
shift = state['shift'],
output_format = out_format_valid,
can_fit=True)
test_data = LMIterator(
batch_size=state['bs'],
path = state['path'],
stop=-1,
use_infinite_loop=False,
allow_short_sequences=True,
seq_len= state['seqlen'],
mode="test",
chunks=state['chunks'],
shift = state['shift'],
output_format = out_format_valid,
can_fit=True)
if 'wiki' in state['path']:
test_data = None
return train_data, valid_data, test_data
def jobman(state, channel):
# load dataset
rng = numpy.random.RandomState(state['seed'])
# declare the dimensionalies of the input and output
if state['chunks'] == 'words':
state['n_in'] = 10000
state['n_out'] = 10000
else:
state['n_in'] = 50
state['n_out'] = 50
train_data, valid_data, test_data = get_text_data(state)
## BEGIN Tutorial
### Define Theano Input Variables
x = TT.lvector('x')
y = TT.lvector('y')
h0 = theano.shared(numpy.zeros((eval(state['nhids'])[-1],), dtype='float32'))
### Neural Implementation of the Operators: \oplus
#### Word Embedding
emb_words = MultiLayer(
rng,
n_in=state['n_in'],
n_hids=eval(state['inp_nhids']),
activation=eval(state['inp_activ']),
init_fn='sample_weights_classic',
weight_noise=state['weight_noise'],
rank_n_approx = state['rank_n_approx'],
scale=state['inp_scale'],
sparsity=state['inp_sparse'],
learn_bias = True,
bias_scale=eval(state['inp_bias']),
name='emb_words')
#### Deep Transition Recurrent Layer
rec = eval(state['rec_layer'])(
rng,
eval(state['nhids']),
activation = eval(state['rec_activ']),
#activation = 'TT.nnet.sigmoid',
bias_scale = eval(state['rec_bias']),
scale=eval(state['rec_scale']),
sparsity=eval(state['rec_sparse']),
init_fn=eval(state['rec_init']),
weight_noise=state['weight_noise'],
name='rec')
#### Stiching them together
##### (1) Get the embedding of a word
x_emb = emb_words(x, no_noise_bias=state['no_noise_bias'])
##### (2) Embedding + Hidden State via DT Recurrent Layer
reset = TT.scalar('reset')
rec_layer = rec(x_emb, n_steps=x.shape[0],
init_state=h0*reset,
no_noise_bias=state['no_noise_bias'],
truncate_gradient=state['truncate_gradient'],
batch_size=1)
## BEGIN Exercise: DOT-RNN
### Neural Implementation of the Operators: \lhd
#### Exercise (1)
#### TODO: Define a layer from the hidden state to the intermediate layer
emb_layer = MultiLayer(
rng,
)
#### Exercise (1)
#### TODO: Define a layer from the input to the intermediate Layer
#### Hidden State: Combine emb_state and emb_words_out
#### Exercise (1)
#### TODO: Define an activation layer
#### Exercise (2)
#### TODO: Define a dropout layer
#### Softmax Layer
output_layer = SoftmaxLayer(
rng,
eval(state['dout_nhid']),
state['n_out'],
scale=state['out_scale'],
bias_scale=state['out_bias_scale'],
init_fn="sample_weights_classic",
weight_noise=state['weight_noise'],
sparsity=state['out_sparse'],
sum_over_time=True,
name='out')
### Few Optional Things
#### Direct shortcut from x to y
if state['shortcut_inpout']:
shortcut = MultiLayer(
rng,
n_in=state['n_in'],
n_hids=eval(state['inpout_nhids']),
activations=eval(state['inpout_activ']),
init_fn='sample_weights_classic',
weight_noise = state['weight_noise'],
scale=eval(state['inpout_scale']),
sparsity=eval(state['inpout_sparse']),
learn_bias=eval(state['inpout_learn_bias']),
bias_scale=eval(state['inpout_bias']),
name='shortcut')
#### Learning rate scheduling (1/(1+n/beta))
state['clr'] = state['lr']
def update_lr(obj, cost):
stp = obj.step
if isinstance(obj.state['lr_start'], int) and stp > obj.state['lr_start']:
time = float(stp - obj.state['lr_start'])
new_lr = obj.state['clr']/(1+time/obj.state['lr_beta'])
obj.lr = new_lr
if state['lr_adapt']:
rec.add_schedule(update_lr)
### Neural Implementations of the Language Model
#### Training
if state['shortcut_inpout']:
additional_inputs = [rec_layer, shortcut(x)]
else:
additional_inputs = [rec_layer]
##### Exercise (1): Compute the output intermediate layer
##### TODO: Compute the output intermediate layer
##### Exercise (2): Apply Dropout
##### TODO: Apply the dropout layer
train_model = output_layer(outhid,
no_noise_bias=state['no_noise_bias'],
additional_inputs=additional_inputs).train(target=y,
scale=numpy.float32(1./state['seqlen']))
nw_h0 = rec_layer.out[rec_layer.out.shape[0]-1]
if state['carry_h0']:
train_model.updates += [(h0, nw_h0)]
#### Validation
h0val = theano.shared(numpy.zeros((eval(state['nhids'])[-1],), dtype='float32'))
rec_layer = rec(emb_words(x, use_noise=False),
n_steps = x.shape[0],
batch_size=1,
init_state=h0val*reset,
use_noise=False)
nw_h0 = rec_layer.out[rec_layer.out.shape[0]-1]
##### Exercise (1):
##### TODO: Compute the output intermediate layer
##### Exercise (2): Apply Dropout
##### TODO: Apply the dropout layer without noise
if state['shortcut_inpout']:
additional_inputs=[rec_layer, shortcut(x, use_noise=False)]
else:
additional_inputs=[rec_layer]
valid_model = output_layer(outhid,
additional_inputs=additional_inputs,
use_noise=False).validate(target=y, sum_over_time=True)
valid_updates = []
if state['carry_h0']:
valid_updates = [(h0val, nw_h0)]
valid_fn = theano.function([x,y, reset], valid_model.cost,
name='valid_fn', updates=valid_updates)
#### Sampling
##### single-step sampling
def sample_fn(word_tm1, h_tm1):
x_emb = emb_words(word_tm1, use_noise = False, one_step=True)
h0 = rec(x_emb, state_before=h_tm1, one_step=True, use_noise=False)[-1]
outhid = outhid_dropout(outhid_activ(emb_state(h0, use_noise=False, one_step=True) +
emb_words_out(word_tm1, use_noise=False, one_step=True), one_step=True),
use_noise=False, one_step=True)
word = output_layer.get_sample(state_below=outhid, additional_inputs=[h0], temp=1.)
return word, h0
##### scan for iterating the single-step sampling multiple times
[samples, summaries], updates = scan(sample_fn,
states = [
TT.alloc(numpy.int64(0), state['sample_steps']),
TT.alloc(numpy.float32(0), 1, eval(state['nhids'])[-1])],
n_steps= state['sample_steps'],
name='sampler_scan')
##### build a Theano function for sampling
sample_fn = theano.function([], [samples],
updates=updates, profile=False, name='sample_fn')
##### Load a dictionary
dictionary = numpy.load(state['dictionary'])
if state['chunks'] == 'chars':
dictionary = dictionary['unique_chars']
else:
dictionary = dictionary['unique_words']
def hook_fn():
sample = sample_fn()[0]
print 'Sample:',
if state['chunks'] == 'chars':
print "".join(dictionary[sample])
else:
for si in sample:
print dictionary[si],
print
### Build and Train a Model
#### Define a model
model = LM_Model(
cost_layer = train_model,
weight_noise_amount=state['weight_noise_amount'],
valid_fn = valid_fn,
clean_before_noise_fn = False,
noise_fn = None,
rng = rng)
if state['reload']:
model.load(state['prefix']+'model.npz')
#### Define a trainer
##### Training algorithm (SGD)
if state['moment'] < 0:
algo = SGD(model, state, train_data)
else:
algo = SGD_m(model, state, train_data)
##### Main loop of the trainer
main = MainLoop(train_data,
valid_data,
test_data,
model,
algo,
state,
channel,
train_cost = False,
hooks = hook_fn,
validate_postprocess = eval(state['validate_postprocess']))
## Run!
main.main()
if __name__=='__main__':
state = {}
# complete path to data (cluster specific)
state['seqlen'] = 100
state['path']= "/Users/KyunghyunCho/GroundHog/data/pentree_char_and_word.npz"
state['dictionary']= "/Users/KyunghyunCho/GroundHog/data/dictionaries.npz"
state['chunks'] = 'chars'
state['seed'] = 123
# flag .. don't need to change it. It says what to do if you get cost to
# be nan .. you could raise, though I would leave it to this
state['on_nan'] = 'warn'
# DATA
# For wikipedia validation set is extremely large. Is very time
# wasteful. This value is only used for validation set, and IMHO should
# be something like seqlen * 10000 (i.e. the validation should be only
# 10000 steps
state['reset'] = -1
# For music/ word level I think 50 is a good idea. For character this
# should be at least 100 (I think there are problems with getting state
# of the art otherwise). Note most people use 200 !
# The job stops when learning rate declines to this value. It can be
# useful, because sometimes is hopeless to wait for validation error to
# get below minerr, or for the time to expire
state['minlr'] = float(5e-7)
# Layers
# Input
# Input weights are sampled from a gaussian with std=scale; this is the
# standard way to initialize
state['rank_n_approx'] = 0
state['inp_nhids'] = '[200]'
state['inp_activ'] = '[linear]'
state['inp_bias'] = '[0.]'
state['inp_sparse']= -1 # dense
state['inp_scale'] = .1
# This is for the output weights
state['out_scale'] = .1
state['out_bias_scale'] = -.5
state['out_sparse'] = -1
state['dout_nhid'] = '200'
state['dout_activ'] = '"TT.nnet.sigmoid"'
state['dout_sparse']= 20
state['dout_scale'] = 1.
state['dout_bias'] = '[0]'
state['dout_init'] = "'sample_weights'"
state['dout_rank_n_approx'] = 0
state['dropout'] = .5
# HidLayer
# Hidden units on for the internal layers of DT-RNN. Having a single
# value results in a standard RNN
state['nhids'] = '[100, 100]'
# Activation of each layer
state['rec_activ'] = '"TT.nnet.sigmoid"'
state['rec_bias'] = '.0'
state['rec_sparse'] ='20'
state['rec_scale'] = '1.'
# sample_weights - you rescale the weights such that the largest
# singular value is scale
# sample_weights_classic : just sample weights from a gaussian with std
# equal to scale
state['rec_init'] = "'sample_weights'"
state['rec_layer'] = 'RecurrentMultiLayerShortPathInpAll'
# SGD params
state['bs'] = 1 # the size of the minibatch
state['lr'] = 1. # initial learning rate
state['cutoff'] = 1. # threshold for gradient rescaling
state['moment'] = 0.995 #-.1 # momentum
# Do not optimize these
state['weight_noise'] = True # white Gaussian noise in weights
state['weight_noise_amount'] = 0.075 # standard deviation
# maximal number of updates
state['loopIters'] = int(1e8)
# maximal number of minutes to wait until killing job
state['timeStop'] = 48*60 # 48 hours
# Construct linear connections from input to output. These are factored
# (like the rank_n) to deal with the possible high dimensionality of the
# input, but it is a linear projection that feeds into the softmax
state['shortcut_inpout'] = False
state['shortcut_rank'] = 200
# Main Loop
# Make this to be a decently large value. Otherwise you waste a lot of
# memory keeping track of the training error (and other things) at each
# step + the stdout becomes extremely large
state['trainFreq'] = 100
state['hookFreq'] = 5000
state['validFreq'] = 1000
state['saveFreq'] = 15 # save every 15 minutes
state['prefix'] = 'model_' # prefix of the save files
state['reload'] = False # reload
state['overwrite'] = 1
# Threhold should be 1.004 for PPL, for entropy (which is what
# everything returns, it should be much smaller. Running value is 1.0002
# We should not hyperoptimize this
state['divide_lr'] = 2.
state['cost_threshold'] = 1.0002
state['patience'] = 1
state['validate_postprocess'] = 'lambda x:10**(x/numpy.log(10))'
state['truncate_gradient'] = 80 # truncated BPTT
state['lr_adapt'] = 0 # 1/(1 + n/n0) scheduling
state['lr_beta'] = 10*1900.
state['lr_start'] = 'on_error'
state['no_noise_bias'] = True # do not use weight noise for biases
state['carry_h0'] = True # carry over h0 across updates
state['sample_steps'] = 80
# Do not change these
state['minerr'] = -1
state['shift'] = 1 # n-step forward prediction
state['cutoff_rescale_length'] = False
jobman(state, None)
|
sjtufs/GroundHog
|
tutorials/DT_RNN_Tut_Ex_Skeleton.py
|
Python
|
bsd-3-clause
| 15,646
|
[
"Gaussian"
] |
51c943820d6a9625594ccbcf4e69cd1234d902b64254a31955d8331f10dc9581
|
"""
====================================================================
Linear and Quadratic Discriminant Analysis with confidence ellipsoid
====================================================================
Plot the confidence ellipsoids of each class and decision boundary
"""
print __doc__
from scipy import linalg
import numpy as np
import pylab as pl
import matplotlib as mpl
from matplotlib import colors
from sklearn.lda import LDA
from sklearn.qda import QDA
###############################################################################
# colormap
cmap = colors.LinearSegmentedColormap('red_blue_classes',
{'red' : [(0, 1, 1), (1, 0.7, 0.7)],
'green' : [(0, 0.7, 0.7), (1, 0.7, 0.7)],
'blue' : [(0, 0.7, 0.7), (1, 1, 1)]
})
pl.cm.register_cmap(cmap=cmap)
###############################################################################
# generate datasets
def dataset_fixed_cov():
'''Generate 2 Gaussians samples with the same covariance matrix'''
n, dim = 300, 2
np.random.seed(0)
C = np.array([[0., -0.23], [0.83, .23]])
X = np.r_[np.dot(np.random.randn(n, dim), C),
np.dot(np.random.randn(n, dim), C) + np.array([1, 1])]
y = np.hstack((np.zeros(n), np.ones(n)))
return X, y
def dataset_cov():
'''Generate 2 Gaussians samples with different covariance matrices'''
n, dim = 300, 2
np.random.seed(0)
C = np.array([[0., -1.], [2.5, .7]]) * 2.
X = np.r_[np.dot(np.random.randn(n, dim), C),
np.dot(np.random.randn(n, dim), C.T) + np.array([1, 4])]
y = np.hstack((np.zeros(n), np.ones(n)))
return X, y
###############################################################################
# plot functions
def plot_data(lda, X, y, y_pred, fig_index):
splot = pl.subplot(2, 2, fig_index)
if fig_index == 1:
pl.title('Linear Discriminant Analysis')
pl.ylabel('Fixed covariance')
elif fig_index == 2:
pl.title('Quadratic Discriminant Analysis')
elif fig_index == 3:
pl.ylabel('Different covariances')
tp = (y == y_pred) # True Positive
tp0, tp1 = tp[y == 0], tp[y == 1]
X0, X1 = X[y == 0], X[y == 1]
X0_tp, X0_fp = X0[tp0], X0[tp0 != True]
X1_tp, X1_fp = X1[tp1], X1[tp1 != True]
xmin, xmax = X[:, 0].min(), X[:, 0].max()
ymin, ymax = X[:, 1].min(), X[:, 1].max()
# class 0: dots
pl.plot(X0_tp[:, 0], X0_tp[:, 1], 'o', color='red')
pl.plot(X0_fp[:, 0], X0_fp[:, 1], '.', color='#990000') # dark red
# class 1: dots
pl.plot(X1_tp[:, 0], X1_tp[:, 1], 'o', color='blue')
pl.plot(X1_fp[:, 0], X1_fp[:, 1], '.', color='#000099') # dark blue
# class 0 and 1 : areas
nx, ny = 200, 100
x_min, x_max = pl.xlim()
y_min, y_max = pl.ylim()
xx, yy = np.meshgrid(np.linspace(x_min, x_max, nx),
np.linspace(y_min, y_max, ny))
Z = lda.predict_proba(np.c_[xx.ravel(), yy.ravel()])
Z = Z[:, 1].reshape(xx.shape)
pl.pcolormesh(xx, yy, Z, cmap='red_blue_classes',
norm=colors.Normalize(0., 1.))
pl.contour(xx, yy, Z, [0.5], linewidths=2., colors='k')
# means
pl.plot(lda.means_[0][0], lda.means_[0][1],
'o', color='black', markersize=10)
pl.plot(lda.means_[1][0], lda.means_[1][1],
'o', color='black', markersize=10)
return splot
def plot_ellipse(splot, mean, cov, color):
v, w = linalg.eigh(cov)
u = w[0] / linalg.norm(w[0])
angle = np.arctan(u[1]/u[0])
angle = 180 * angle / np.pi # convert to degrees
# filled gaussian at 2 standard deviation
ell = mpl.patches.Ellipse(mean, 2 * v[0] ** 0.5, 2 * v[1] ** 0.5,
180 + angle, color=color)
ell.set_clip_box(splot.bbox)
ell.set_alpha(0.5)
splot.add_artist(ell)
def plot_lda_cov(lda, splot):
plot_ellipse(splot, lda.means_[0], lda.covariance_, 'red')
plot_ellipse(splot, lda.means_[1], lda.covariance_, 'blue')
def plot_qda_cov(qda, splot):
plot_ellipse(splot, qda.means_[0], qda.covariances_[0], 'red')
plot_ellipse(splot, qda.means_[1], qda.covariances_[1], 'blue')
###############################################################################
for i, (X, y) in enumerate([dataset_fixed_cov(), dataset_cov()]):
# LDA
lda = LDA()
y_pred = lda.fit(X, y, store_covariance=True).predict(X)
splot = plot_data(lda, X, y, y_pred, fig_index=2 * i + 1)
plot_lda_cov(lda, splot)
pl.axis('tight')
# QDA
qda = QDA()
y_pred = qda.fit(X, y, store_covariances=True).predict(X)
splot = plot_data(qda, X, y, y_pred, fig_index=2 * i + 2)
plot_qda_cov(qda, splot)
pl.axis('tight')
pl.suptitle('LDA vs QDA')
pl.show()
|
joshbohde/scikit-learn
|
examples/plot_lda_qda.py
|
Python
|
bsd-3-clause
| 4,715
|
[
"Gaussian"
] |
4dab99f348a2a75f2c30cdaf8b734f1e486c997d1be37b1086e5c5ff86271d96
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.